index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/binary/EVCacheNodeImpl.java
package net.spy.memcached.protocol.binary; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.channels.SocketChannel; import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanServer; import javax.management.ObjectName; import org.joda.time.format.ISODateTimeFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.EVCache; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.ServerGroup; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Tag; import net.spy.memcached.ConnectionFactory; import net.spy.memcached.EVCacheNode; import net.spy.memcached.EVCacheNodeMBean; import net.spy.memcached.ops.Operation; //import sun.misc.Cleaner; //import sun.nio.ch.DirectBuffer; @SuppressWarnings("restriction") @edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "FCBL_FIELD_COULD_BE_LOCAL", "EXS_EXCEPTION_SOFTENING_NO_CHECKED", "REC_CATCH_EXCEPTION", "SCII_SPOILED_CHILD_INTERFACE_IMPLEMENTATOR" }) public class EVCacheNodeImpl extends BinaryMemcachedNodeImpl implements EVCacheNodeMBean, EVCacheNode { private static final Logger log = LoggerFactory.getLogger(EVCacheNodeImpl.class); protected long stTime; protected final String hostName; protected final BlockingQueue<Operation> readQ; protected final BlockingQueue<Operation> inputQueue; protected final EVCacheClient client; //protected Counter reconnectCounter; private final AtomicInteger numOps = new AtomicInteger(0); private long timeoutStartTime; protected final Counter operationsCounter; public EVCacheNodeImpl(SocketAddress sa, SocketChannel c, int bufSize, BlockingQueue<Operation> rq, BlockingQueue<Operation> wq, BlockingQueue<Operation> iq, long opQueueMaxBlockTimeMillis, boolean waitForAuth, long dt, long at, ConnectionFactory fa, EVCacheClient client, long stTime) { super(sa, c, bufSize, rq, wq, iq, Long.valueOf(opQueueMaxBlockTimeMillis), waitForAuth, dt, at, fa); this.client = client; final String appName = client.getAppName(); this.readQ = rq; this.inputQueue = iq; this.hostName = ((InetSocketAddress) getSocketAddress()).getHostName(); // final List<Tag> tagsCounter = new ArrayList<Tag>(5); // tagsCounter.add(new BasicTag(EVCacheMetricsFactory.CACHE, client.getAppName())); // tagsCounter.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName())); // tagsCounter.add(new BasicTag(EVCacheMetricsFactory.ZONE, client.getZone())); //tagsCounter.add(new BasicTag(EVCacheMetricsFactory.HOST, hostName)); //TODO : enable this and see what is the impact this.operationsCounter = client.getOperationCounter(); setConnectTime(stTime); setupMonitoring(appName); } private String getMonitorName(String appName) { return "com.netflix.evcache:Group=" + appName + ",SubGroup=pool" + ",SubSubGroup=" + client.getServerGroupName() + ",SubSubSubGroup=" + client.getId() + ",SubSubSubSubGroup=" + hostName + "_" + stTime; } private void setupMonitoring(String appName) { try { final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(appName)); final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } mbeanServer.registerMBean(this, mBeanName); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e); } } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#registerMonitors() */ @Override public void registerMonitors() { // try { // EVCacheMetricsFactory.getInstance().getRegistry().register(this); // } catch (Exception e) { // if (log.isWarnEnabled()) log.warn("Exception while registering.", e); // } } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#isAvailable(com.netflix.evcache.EVCache.Call) */ @Override public boolean isAvailable(EVCache.Call call) { return isActive(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getWriteQueueSize() */ @Override public int getWriteQueueSize() { return writeQ.size(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getReadQueueSize() */ @Override public int getReadQueueSize() { return readQ.size(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getInputQueueSize() */ @Override public int getInputQueueSize() { return inputQueue.size(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#incrOps() */ @Override public long incrOps() { operationsCounter.increment(); return numOps.incrementAndGet(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getNumOfOps() */ @Override public long getNumOfOps() { return numOps.get(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#flushInputQueue() */ @Override public void flushInputQueue() { inputQueue.clear(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getStartTime() */ @Override public long getStartTime() { return stTime; } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getTimeoutStartTime() */ @Override public long getTimeoutStartTime() { return timeoutStartTime; } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#removeMonitoring() */ @Override public void removeMonitoring() { try { final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(client.getAppName())); final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e); } } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#shutdown() */ @Override public void shutdown() { removeMonitoring(); writeQ.clear(); readQ.clear(); inputQueue.clear(); try { // Cleanup the ByteBuffers only if they are sun.nio.ch.DirectBuffer // If we don't cleanup then we will leak 16K of memory // if (getRbuf() instanceof DirectBuffer) { // Cleaner cleaner = ((DirectBuffer) getRbuf()).cleaner(); // if (cleaner != null) cleaner.clean(); // cleaner = ((DirectBuffer) getWbuf()).cleaner(); // if (cleaner != null) cleaner.clean(); // } } catch (Throwable t) { getLogger().error("Exception cleaning ByteBuffer.", t); } } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getCreateTime() */ @Override public long getCreateTime() { return stTime; } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#setConnectTime(long) */ @Override public void setConnectTime(long cTime) { this.stTime = cTime; // if(reconnectCounter == null) { // final List<Tag> tags = new ArrayList<Tag>(5); // tags.add(new BasicTag(EVCacheMetricsFactory.CACHE, client.getAppName())); // tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName())); // tags.add(new BasicTag(EVCacheMetricsFactory.ZONE, client.getZone())); // tags.add(new BasicTag(EVCacheMetricsFactory.HOST, hostName)); // tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.RECONNECT)); // this.reconnectCounter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_RECONNECT, tags); // // } // reconnectCounter.increment(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getAppName() */ @Override public String getAppName() { return client.getAppName(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getHostName() */ @Override public String getHostName() { return hostName; } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getServerGroup() */ @Override public ServerGroup getServerGroup() { return client.getServerGroup(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getId() */ @Override public int getId() { return client.getId(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getTags() */ @Override public List<Tag> getTags() { return client.getTagList(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getTotalReconnectCount() */ @Override public int getTotalReconnectCount() { // if(reconnectCounter == null) return 0; // return (int)reconnectCounter.count(); return getReconnectCount(); } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getSocketChannelLocalAddress() */ @Override public String getSocketChannelLocalAddress() { try { if(getChannel() != null) { return getChannel().getLocalAddress().toString(); } } catch (IOException e) { log.error("Exception", e); } return "NULL"; } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getSocketChannelRemoteAddress() */ @Override public String getSocketChannelRemoteAddress() { try { if(getChannel() != null) { return getChannel().getRemoteAddress().toString(); } } catch (IOException e) { log.error("Exception", e); } return "NULL"; } /* (non-Javadoc) * @see net.spy.memcached.protocol.binary.EVCacheNode1#getConnectTime() */ @Override public String getConnectTime() { return ISODateTimeFormat.dateTime().print(stTime); } @Override public EVCacheClient getEVCacheClient() { return client; } }
4,000
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheImpl.java
package com.netflix.evcache; import static com.netflix.evcache.util.Sneaky.sneakyThrow; import java.lang.management.ManagementFactory; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Function; import java.util.stream.Collectors; import javax.management.MBeanServer; import javax.management.ObjectName; import com.netflix.evcache.dto.KeyMapDto; import com.netflix.evcache.util.EVCacheBulkDataDto; import com.netflix.evcache.util.KeyHasher; import com.netflix.evcache.util.RetryCount; import com.netflix.evcache.util.Sneaky; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.archaius.api.PropertyRepository; import com.netflix.evcache.EVCacheInMemoryCache.DataNotFoundException; import com.netflix.evcache.EVCacheLatch.Policy; import com.netflix.evcache.event.EVCacheEvent; import com.netflix.evcache.event.EVCacheEventListener; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.operation.EVCacheFuture; import com.netflix.evcache.operation.EVCacheItem; import com.netflix.evcache.operation.EVCacheItemMetaData; import com.netflix.evcache.operation.EVCacheLatchImpl; import com.netflix.evcache.operation.EVCacheOperationFuture; import com.netflix.evcache.pool.ChunkTranscoder; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPool; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.pool.EVCacheClientUtil; import com.netflix.evcache.pool.EVCacheValue; import com.netflix.evcache.pool.ServerGroup; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.DistributionSummary; import com.netflix.spectator.api.Tag; import com.netflix.spectator.api.Timer; import net.spy.memcached.CachedData; import net.spy.memcached.transcoders.Transcoder; import rx.Observable; import rx.Scheduler; import rx.Single; /** * An implementation of a ephemeral volatile cache. * * @author smadappa * @version 2.0 */ @SuppressWarnings("unchecked") @edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "WMI_WRONG_MAP_ITERATOR", "DB_DUPLICATE_BRANCHES", "REC_CATCH_EXCEPTION","RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE" }) public class EVCacheImpl implements EVCache, EVCacheImplMBean { private static final Logger log = LoggerFactory.getLogger(EVCacheImpl.class); private final String _appName; private final String _cacheName; private final String _metricPrefix; protected final Transcoder<?> _transcoder; private final boolean _zoneFallback; private final boolean _throwException; private final int _timeToLive; // defaults to 15 minutes protected EVCacheClientPool _pool; private final Property<Boolean> _throwExceptionFP, _zoneFallbackFP, _useInMemoryCache; private final Property<Boolean> _bulkZoneFallbackFP; private final Property<Boolean> _bulkPartialZoneFallbackFP; private final List<Tag> tags; private EVCacheInMemoryCache<?> cache; private EVCacheClientUtil clientUtil = null; private final Property<Boolean> ignoreTouch; private final Property<Boolean> hashKey; private final Property<String> hashingAlgo; private final Property<Boolean> shouldEncodeHashKey; private final Property<Integer> maxDigestBytes; private final Property<Integer> maxHashLength; private final EVCacheTranscoder evcacheValueTranscoder; private final Property<Integer> maxReadDuration, maxWriteDuration; protected final EVCacheClientPoolManager _poolManager; private final Map<String, Timer> timerMap = new ConcurrentHashMap<String, Timer>(); private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>(); private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>(); private final Property<Boolean> _eventsUsingLatchFP, autoHashKeys; private DistributionSummary bulkKeysSize = null; private final Property<Integer> maxKeyLength; private final Property<String> alias; private final Property<String> encoderBase; EVCacheImpl(String appName, String cacheName, int timeToLive, Transcoder<?> transcoder, boolean enableZoneFallback, boolean throwException, EVCacheClientPoolManager poolManager) { this._appName = appName; this._cacheName = cacheName; if(_cacheName != null && _cacheName.length() > 0) { for(int i = 0; i < cacheName.length(); i++) { if(Character.isWhitespace(cacheName.charAt(i))){ throw new IllegalArgumentException("Cache Prefix ``" + cacheName + "`` contains invalid character at position " + i ); } } } this._timeToLive = timeToLive; this._transcoder = transcoder; this._zoneFallback = enableZoneFallback; this._throwException = throwException; tags = new ArrayList<Tag>(3); EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName); if(_cacheName != null && _cacheName.length() > 0) tags.add(new BasicTag(EVCacheMetricsFactory.PREFIX, _cacheName)); final String _metricName = (_cacheName == null) ? _appName : _appName + "." + _cacheName; _metricPrefix = _appName + "-"; this._poolManager = poolManager; this._pool = poolManager.getEVCacheClientPool(_appName); final PropertyRepository propertyRepository = poolManager.getEVCacheConfig().getPropertyRepository(); _throwExceptionFP = propertyRepository.get(_metricName + ".throw.exception", Boolean.class).orElseGet(_appName + ".throw.exception").orElse(false); _zoneFallbackFP = propertyRepository.get(_metricName + ".fallback.zone", Boolean.class).orElseGet(_appName + ".fallback.zone").orElse(true); _bulkZoneFallbackFP = propertyRepository.get(_appName + ".bulk.fallback.zone", Boolean.class).orElse(true); _bulkPartialZoneFallbackFP = propertyRepository.get(_appName+ ".bulk.partial.fallback.zone", Boolean.class).orElse(true); if(_cacheName == null) { _useInMemoryCache = propertyRepository.get(_appName + ".use.inmemory.cache", Boolean.class).orElseGet("evcache.use.inmemory.cache").orElse(false); } else { _useInMemoryCache = propertyRepository.get(_appName + "." + _cacheName + ".use.inmemory.cache", Boolean.class).orElseGet(_appName + ".use.inmemory.cache").orElseGet("evcache.use.inmemory.cache").orElse(false); } _eventsUsingLatchFP = propertyRepository.get(_appName + ".events.using.latch", Boolean.class).orElseGet("evcache.events.using.latch").orElse(false); maxReadDuration = propertyRepository.get(_appName + ".max.read.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(20); maxWriteDuration = propertyRepository.get(_appName + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50); ignoreTouch = propertyRepository.get(appName + ".ignore.touch", Boolean.class).orElse(false); this.hashKey = propertyRepository.get(appName + ".hash.key", Boolean.class).orElse(false); this.hashingAlgo = propertyRepository.get(appName + ".hash.algo", String.class).orElse("siphash24"); this.shouldEncodeHashKey = propertyRepository.get(appName + ".hash.encode", Boolean.class).orElse(true); this.maxDigestBytes = propertyRepository.get(appName + ".max.digest.bytes", Integer.class).orElse(-1); this.maxHashLength = propertyRepository.get(appName + ".max.hash.length", Integer.class).orElse(-1); this.encoderBase = propertyRepository.get(appName + ".hash.encoder", String.class).orElse("base64"); this.autoHashKeys = propertyRepository.get(_appName + ".auto.hash.keys", Boolean.class).orElseGet("evcache.auto.hash.keys").orElse(false); this.evcacheValueTranscoder = new EVCacheTranscoder(); evcacheValueTranscoder.setCompressionThreshold(Integer.MAX_VALUE); // default max key length is 200, instead of using what is defined in MemcachedClientIF.MAX_KEY_LENGTH (250). This is to accommodate // auto key prepend with appname for duet feature. this.maxKeyLength = propertyRepository.get(_appName + ".max.key.length", Integer.class).orElseGet("evcache.max.key.length").orElse(200); // if alias changes, refresh my pool to point to the correct alias app this.alias = propertyRepository.get("EVCacheClientPoolManager." + appName + ".alias", String.class); this.alias.subscribe(i -> { this._pool = poolManager.getEVCacheClientPool(_appName); }); _pool.pingServers(); setupMonitoring(); } private void setupMonitoring() { try { final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + _appName + ",SubGroup=Impl"); final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } mbeanServer.registerMBean(this, mBeanName); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception", e); } } EVCacheKey getEVCacheKey(final String key) { if(key == null || key.length() == 0) throw new NullPointerException("Key cannot be null or empty"); for(int i = 0; i < key.length(); i++) { if(Character.isWhitespace(key.charAt(i))){ throw new IllegalArgumentException("key ``" + key + "`` contains invalid character at position " + i ); } } final String canonicalKey; if (this._cacheName == null) { canonicalKey = key; } else { final int keyLength = _cacheName.length() + 1 + key.length(); canonicalKey = new StringBuilder(keyLength).append(_cacheName).append(':').append(key).toString(); } if (canonicalKey.length() > this.maxKeyLength.get() && !hashKey.get() && !autoHashKeys.get()) { throw new IllegalArgumentException("Key is too long (maxlen = " + this.maxKeyLength.get() + ')'); } boolean shouldHashKeyAtAppLevel = hashKey.get() || (canonicalKey.length() > this.maxKeyLength.get() && autoHashKeys.get()); final EVCacheKey evcKey = new EVCacheKey(_appName, key, canonicalKey, shouldHashKeyAtAppLevel ? KeyHasher.getHashingAlgorithmFromString(hashingAlgo.get()) : null, this.shouldEncodeHashKey, this.maxDigestBytes, this.maxHashLength, this.encoderBase.get()); if (log.isDebugEnabled() && shouldLog()) log.debug("Key : " + key + "; EVCacheKey : " + evcKey); return evcKey; } private boolean hasZoneFallbackForBulk() { if (!_pool.supportsFallback()) return false; if (!_bulkZoneFallbackFP.get()) return false; return _zoneFallback; } private boolean hasZoneFallback() { if (!_pool.supportsFallback()) return false; if (!_zoneFallbackFP.get().booleanValue()) return false; return _zoneFallback; } private boolean shouldLog() { return _poolManager.shouldLog(_appName); } private boolean doThrowException() { return (_throwException || _throwExceptionFP.get().booleanValue()); } private List<EVCacheEventListener> getEVCacheEventListeners() { return _poolManager.getEVCacheEventListeners(); } private EVCacheEvent createEVCacheEvent(Collection<EVCacheClient> clients, Call call) { final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners(); if (evcacheEventListenerList == null || evcacheEventListenerList.size() == 0) return null; final EVCacheEvent event = new EVCacheEvent(call, _appName, _cacheName, _pool); event.setClients(clients); return event; } private boolean shouldThrottle(EVCacheEvent event) { for (EVCacheEventListener evcacheEventListener : getEVCacheEventListeners()) { try { if (evcacheEventListener.onThrottle(event)) { return true; } } catch(Exception e) { incrementEventFailure("throttle", event.getCall(), evcacheEventListener.getClass().getName()); if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing throttle event on listener " + evcacheEventListener + " for event " + event, e); } } return false; } private void startEvent(EVCacheEvent event) { final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners(); for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) { try { evcacheEventListener.onStart(event); } catch(Exception e) { incrementEventFailure("start", event.getCall(), evcacheEventListener.getClass().getName()); if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing start event on listener " + evcacheEventListener + " for event " + event, e); } } } private void endEvent(EVCacheEvent event) { event.setEndTime(System.currentTimeMillis()); final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners(); for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) { try { evcacheEventListener.onComplete(event); } catch(Exception e) { incrementEventFailure("end", event.getCall(), evcacheEventListener.getClass().getName()); if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing end event on listener " + evcacheEventListener + " for event " + event, e); } } } private void eventError(EVCacheEvent event, Throwable t) { event.setEndTime(System.currentTimeMillis()); final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners(); for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) { try { evcacheEventListener.onError(event, t); } catch(Exception e) { incrementEventFailure("error", event.getCall(), evcacheEventListener.getClass().getName()); if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing error event on listener " + evcacheEventListener + " for event " + event, e); } } } private <T> EVCacheInMemoryCache<T> getInMemoryCache(Transcoder<T> tc) { if (cache == null) cache = _poolManager.createInMemoryCache(tc, this); return (EVCacheInMemoryCache<T>) cache; } public <T> T get(String key) throws EVCacheException { return this.get(key, (Transcoder<T>) _transcoder); } private void incrementFastFail(String metric, Call call) { final String name = metric + call.name(); Counter counter = counterMap.get(name); if(counter == null) { final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3); tagList.addAll(tags); if(call != null) { final String operation = call.name(); final String operationType; switch(call) { case GET: case GET_AND_TOUCH: case GETL: case BULK: case COMPLETABLE_FUTURE_GET: case COMPLETABLE_FUTURE_GET_BULK: case ASYNC_GET: operationType = EVCacheMetricsFactory.READ; break; default : operationType = EVCacheMetricsFactory.WRITE; } if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation)); if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType)); } tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric)); counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.FAST_FAIL, tagList); counterMap.put(name, counter); } counter.increment(); } private void incrementEventFailure(String metric, Call call, String event) { final String name = metric + call.name() + event; Counter counter = counterMap.get(name); if(counter == null) { final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3); tagList.addAll(tags); if(call != null) { final String operation = call.name(); final String operationType; switch(call) { case GET: case GET_AND_TOUCH: case GETL: case BULK: case ASYNC_GET: operationType = EVCacheMetricsFactory.READ; break; default : operationType = EVCacheMetricsFactory.WRITE; } if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation)); if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType)); } tagList.add(new BasicTag(EVCacheMetricsFactory.EVENT_STAGE, metric)); tagList.add(new BasicTag(EVCacheMetricsFactory.EVENT, event)); counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_EVENT_FAIL, tagList); counterMap.put(name, counter); } counter.increment(); } private void incrementFailure(String metric, String operation, String operationType) { final String name = metric + operation; Counter counter = counterMap.get(name); if(counter == null) { final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3); tagList.addAll(tags); if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation)); if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType)); tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric)); counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_FAIL, tagList); counterMap.put(name, counter); } counter.increment(); } public <T> T get(String key, Transcoder<T> tc) throws EVCacheException { if (null == key) throw new IllegalArgumentException("Key cannot be null"); final EVCacheKey evcKey = getEVCacheKey(key); if (_useInMemoryCache.get()) { T value = null; try { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc; value = (T) getInMemoryCache(transcoder).get(evcKey); } catch (ExecutionException e) { final boolean throwExc = doThrowException(); if(throwExc) { if(e.getCause() instanceof DataNotFoundException) { return null; } if(e.getCause() instanceof EVCacheException) { if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e); throw (EVCacheException)e.getCause(); } throw new EVCacheException("ExecutionException", e); } } if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : "")); if (value != null) { if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : "")); return value; } else { if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in inmemory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value ); } } return doGet(evcKey, tc); } <T> T doGet(EVCacheKey evcKey , Transcoder<T> tc) throws EVCacheException { final boolean throwExc = doThrowException(); EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET); if (throwExc) throw new EVCacheException("Could not find a client to get the data APP " + _appName); return null; // Fast failure } final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; String cacheOperation = EVCacheMetricsFactory.YES; int tries = 1; try { final boolean hasZF = hasZoneFallback(); boolean throwEx = hasZF ? false : throwExc; T data = getData(client, evcKey, tc, throwEx, hasZF); if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { for (int i = 0; i < fbClients.size(); i++) { final EVCacheClient fbClient = fbClients.get(i); if(i >= fbClients.size() - 1) throwEx = throwExc; if (event != null) { try { if (shouldThrottle(event)) { status = EVCacheMetricsFactory.THROTTLED; if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; status = EVCacheMetricsFactory.THROTTLED; return null; } } tries++; data = getData(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false); if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup()); if (data != null) { client = fbClient; break; } } } } if (data != null) { if (event != null) event.setAttribute("status", "GHIT"); } else { cacheOperation = EVCacheMetricsFactory.NO; if (event != null) event.setAttribute("status", "GMISS"); if (log.isInfoEnabled() && shouldLog()) log.info("GET : APP " + _appName + " ; cache miss for key : " + evcKey); } if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup()); if (event != null) endEvent(event); return data; } catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) { status = EVCacheMetricsFactory.TIMEOUT; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("CheckedOperationTimeoutException getting data for APP " + _appName + ", key = " + evcKey + ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex); } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("Exception getting data for APP " + _appName + ", key = " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.GET.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + duration + " milliSec."); } } public <T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) { if (null == key) throw new IllegalArgumentException("Key cannot be null"); final EVCacheKey evcKey = getEVCacheKey(key); return getAsyncInMemory(evcKey, tc) .thenCompose(data -> data == null ? doAsyncGet(evcKey, tc) : CompletableFuture.completedFuture(data)); } public <T> CompletableFuture<T> getAsync(String key) { return this.getAsync(key, (Transcoder<T>) _transcoder); } private <T> T getInMemory(EVCacheKey evcKey, Transcoder<T> tc) throws Exception { if (_useInMemoryCache.get()) { try { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc; T value = getInMemoryCache(transcoder).get(evcKey); if (value != null) { if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from in-memory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : "")); return value; } else { if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in in-memory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value ); } } catch (Exception e) { return handleInMemoryException(e); } } return null; } private <T> CompletableFuture<T> getAsyncInMemory(EVCacheKey evcKey, Transcoder<T> tc) { CompletableFuture<T> promise = new CompletableFuture<>(); try { if(log.isDebugEnabled() && shouldLog()) { log.debug("Retrieving value from memory {} ", evcKey.getKey()); } T t = getInMemory(evcKey, tc); promise.complete(t); } catch (Exception ex) { promise.completeExceptionally(ex); } return promise; } private <T> T handleInMemoryException(Exception e) throws Exception { final boolean throwExc = doThrowException(); if(throwExc) { if(e.getCause() instanceof DataNotFoundException) { if (log.isDebugEnabled() && shouldLog()) log.debug("DataNotFoundException while getting data from InMemory Cache", e); return null; } if(e.getCause() instanceof EVCacheException) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data from InMemory Cache", e); throw e; } else { throw new EVCacheException("ExecutionException", e); } } else { if (log.isDebugEnabled() && shouldLog()) log.debug("Throws Exception is false and returning null in this case"); return null; } } private <T> CompletableFuture<T> doAsyncGet(EVCacheKey evcKey, Transcoder<T> tc) { CompletableFuture<T> errorFuture = new CompletableFuture<>(); final boolean throwExc = doThrowException(); //Building the client EVCacheClient client = buildEvCacheClient(throwExc, Call.COMPLETABLE_FUTURE_GET, errorFuture); if (errorFuture.isCompletedExceptionally() || client == null) { if (client == null ) { if (log.isDebugEnabled() && shouldLog()) log.debug("client is null"); errorFuture.complete(null); } return errorFuture; } if (log.isDebugEnabled() && shouldLog()) log.debug("Completed Building the client"); //Building the start event EVCacheEvent event = buildAndStartEvent(client, Collections.singletonList(evcKey), throwExc, errorFuture, Call.COMPLETABLE_FUTURE_GET); if (errorFuture.isCompletedExceptionally()) { if (log.isDebugEnabled() && shouldLog()) log.debug("Error while building and starting the event"); return errorFuture; } errorFuture.cancel(false); final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); StringBuilder status = new StringBuilder(EVCacheMetricsFactory.SUCCESS); StringBuilder cacheOperation = new StringBuilder(EVCacheMetricsFactory.YES); final boolean hasZF = hasZoneFallback(); RetryCount retryCount = new RetryCount(); boolean throwEx = !hasZF && throwExc; return getAsyncData(client, evcKey, tc) .thenCompose(data -> handleRetry(data, evcKey, tc, client, hasZF, throwExc, event, retryCount)) .handle((data, ex) -> { if (ex != null) { handleMissData(event, evcKey, client, cacheOperation); handleFinally(data, status, retryCount.get(), client, cacheOperation, start, Call.COMPLETABLE_FUTURE_GET); handleException(ex, event); if (throwEx) { throw new RuntimeException(ex); } else { return null; } } else { handleFinally(data, status, retryCount.get(), client, cacheOperation, start, Call.COMPLETABLE_FUTURE_GET); handleData(data, event, evcKey, client, cacheOperation); return data; } }); } private <T> EVCacheClient buildEvCacheClient(boolean throwExc, Call callType, CompletableFuture<T> completableFuture) { EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, callType); if (throwExc) completableFuture.completeExceptionally(new EVCacheException("Could not find a client to get the data APP " + _appName)); return null; } return client; } private <T> EVCacheEvent buildAndStartEvent(EVCacheClient client, List<EVCacheKey> evcKeys, boolean throwExc, CompletableFuture<T> completableFuture, Call callType) { EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), callType); if (event != null) { event.setEVCacheKeys(evcKeys); if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, callType); if (throwExc) completableFuture.completeExceptionally(new EVCacheException("Request Throttled for app " + _appName + " & keys " + evcKeys)); return null; } startEvent(event); return event; } return null; } private <T> void handleBulkFinally(StringBuilder status, RetryCount tries, EVCacheClient client, StringBuilder cacheOperation, Collection<String> keys, Long start) { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; if (bulkKeysSize == null) { final List<Tag> tagList = new ArrayList<Tag>(4); tagList.addAll(tags); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION)); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ)); bulkKeysSize = EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.OVERALL_KEYS_SIZE, tagList); } bulkKeysSize.record(keys.size()); getTimer(Call.COMPLETABLE_FUTURE_GET_BULK.name(), EVCacheMetricsFactory.READ, cacheOperation.toString(), status.toString(), tries.get(), maxReadDuration.get(), client.getServerGroup()) .record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("ASYNC GET BULK : APP " + _appName + " Took " + duration + " milliSec to get the value for key " + keys); } private <T> T handleFinally(T data, StringBuilder status, Integer tries, EVCacheClient client, StringBuilder cacheOperation, Long start, Call call) { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime() - start; getTimer(call.name(), EVCacheMetricsFactory.READ, cacheOperation.toString(), status.toString(), tries, maxReadDuration.get(), client.getServerGroup()) .record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("GET ASYNC : APP " + _appName + ", Took " + duration + " milliSec."); return data; } private void handleException(Throwable ex, EVCacheEvent event) { if (ex.getCause() instanceof RuntimeException) { if (log.isDebugEnabled() && shouldLog()) { log.debug("Handling exception with cause ", ex.getCause()); } Throwable runTimeCause = ex.getCause(); if (runTimeCause.getCause() instanceof ExecutionException) { if (log.isDebugEnabled() && shouldLog()) { log.debug("Handling ExecutionException with cause ",runTimeCause.getCause()); } Throwable executionExceptionCause = runTimeCause.getCause(); if (executionExceptionCause.getCause() instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) { if (event != null) { if (log.isDebugEnabled() && shouldLog()) { log.debug("Setting Status as Timeout"); } event.setStatus(EVCacheMetricsFactory.TIMEOUT); eventError(event, ex); } } return; } } if (event != null) { if (log.isDebugEnabled() && shouldLog()) { log.debug("Setting event as Error"); } event.setStatus(EVCacheMetricsFactory.ERROR); eventError(event, ex); } } private <T> void handleMissData(EVCacheEvent event, EVCacheKey evcKey, EVCacheClient client, StringBuilder cacheOperation) { cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.NO); if (event != null) event.setAttribute("status", "GMISS"); if (log.isInfoEnabled() && shouldLog()) log.info("GET ASYNC : APP " + _appName + " ; cache miss for key : " + evcKey); endEvent(null, evcKey, client, event); } private <T> void handleData(T data, EVCacheEvent event, EVCacheKey evcKey, EVCacheClient client, StringBuilder cacheOperation) { if (event != null) event.setAttribute("status", "GHIT"); endEvent(data, evcKey, client, event); } private <T> void endEvent(T data, EVCacheKey evcKey, EVCacheClient client, EVCacheEvent event) { if (log.isDebugEnabled() && shouldLog()) log.debug("COMPLETABLE FUTURE GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup()); if (event != null) endEvent(event); } private <T> CompletableFuture<T> handleRetries(List<EVCacheClient> fbClients, int fbClientIndex, EVCacheEvent event, EVCacheKey evcKey, Transcoder<T> tc, RetryCount retryCount) { if (fbClientIndex >= fbClients.size()) { return CompletableFuture.completedFuture(null); } if (log.isDebugEnabled() && shouldLog()) { log.debug("searching key in the server {}", fbClients.get(fbClientIndex).getServerGroup().getName()); } CompletableFuture<T> future = getAsyncData( fbClients.get(fbClientIndex), event, evcKey, tc); int nextIndex = fbClientIndex + 1; retryCount.incr(); return future.thenApply(s -> s != null ? handleSuccessCompletion(s, evcKey, fbClients, fbClientIndex, retryCount) : handleRetries(fbClients, nextIndex, event, evcKey, tc, retryCount)) .exceptionally(t -> handleRetries(fbClients, nextIndex, event, evcKey, tc, retryCount)) .thenCompose(Function.identity()); } public <T> CompletableFuture<T> handleSuccessCompletion(T s, EVCacheKey key, List<EVCacheClient> fbClients, int index, RetryCount retryCount) { if (log.isDebugEnabled() && shouldLog()) { log.debug("fetched the key {} from server {} and retry count {}", key.getKey(), fbClients.get(index).getServerGroup().getName(), retryCount.get()); } return CompletableFuture.completedFuture(s); } private <T> CompletableFuture<T> handleRetry(T data, EVCacheKey evcKey, Transcoder<T> tc, EVCacheClient client, boolean hasZF, boolean throwExc, EVCacheEvent event, RetryCount retryCount) { if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); return handleRetries(fbClients, 0, event, evcKey, tc, retryCount); } return CompletableFuture.completedFuture(data); } public EVCacheItemMetaData metaDebug(String key) throws EVCacheException { return this.metaDebugInternal(key, false); } protected EVCacheItemMetaData metaDebugInternal(String key, boolean isOriginalKeyHashed) throws EVCacheException { if (null == key) throw new IllegalArgumentException("Key cannot be null"); final EVCacheKey evcKey = getEVCacheKey(key); final boolean throwExc = doThrowException(); EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.META_DEBUG); if (throwExc) throw new EVCacheException("Could not find a client to get the metadata for APP " + _appName); return null; // Fast failure } final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.META_DEBUG); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_DEBUG); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_DEBUG); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; String cacheOperation = EVCacheMetricsFactory.YES; int tries = 1; try { final boolean hasZF = hasZoneFallback(); boolean throwEx = hasZF ? false : throwExc; EVCacheItemMetaData data = getEVCacheItemMetaData(client, evcKey, throwEx, hasZF, isOriginalKeyHashed); if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { for (int i = 0; i < fbClients.size(); i++) { final EVCacheClient fbClient = fbClients.get(i); if(i >= fbClients.size() - 1) throwEx = throwExc; if (event != null) { try { if (shouldThrottle(event)) { status = EVCacheMetricsFactory.THROTTLED; if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; status = EVCacheMetricsFactory.THROTTLED; return null; } } tries++; data = getEVCacheItemMetaData(fbClient, evcKey, throwEx, (i < fbClients.size() - 1) ? true : false, isOriginalKeyHashed); if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup()); if (data != null) { client = fbClient; break; } } } } if (data != null) { if (event != null) event.setAttribute("status", "MDHIT"); } else { cacheOperation = EVCacheMetricsFactory.NO; if (event != null) event.setAttribute("status", "MDMISS"); if (log.isInfoEnabled() && shouldLog()) log.info("META_DEBUG : APP " + _appName + " ; cache miss for key : " + evcKey); } if (log.isDebugEnabled() && shouldLog()) log.debug("META_DEBUG : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup()); if (event != null) endEvent(event); return data; } catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) { status = EVCacheMetricsFactory.TIMEOUT; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("CheckedOperationTimeoutException getting with meta data for APP " + _appName + ", key = " + evcKey + ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex); } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("Exception getting with metadata for APP " + _appName + ", key = " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.META_DEBUG.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("META_DEBUG : APP " + _appName + ", Took " + duration + " milliSec."); } } public <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc) throws EVCacheException { return this.metaGetInternal(key, tc, false); } protected <T> EVCacheItem<T> metaGetInternal(String key, Transcoder<T> tc, boolean isOriginalKeyHashed) throws EVCacheException { if (null == key) throw new IllegalArgumentException("Key cannot be null"); final EVCacheKey evcKey = getEVCacheKey(key); final boolean throwExc = doThrowException(); EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.META_GET); if (throwExc) throw new EVCacheException("Could not find a client to get the data APP " + _appName); return null; // Fast failure } final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.META_GET); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_GET); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_GET); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; String cacheOperation = EVCacheMetricsFactory.YES; int tries = 1; try { final boolean hasZF = hasZoneFallback(); boolean throwEx = hasZF ? false : throwExc; EVCacheItem<T> data = getEVCacheItem(client, evcKey, tc, throwEx, hasZF, isOriginalKeyHashed, true); if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { for (int i = 0; i < fbClients.size(); i++) { final EVCacheClient fbClient = fbClients.get(i); if(i >= fbClients.size() - 1) throwEx = throwExc; if (event != null) { try { if (shouldThrottle(event)) { status = EVCacheMetricsFactory.THROTTLED; if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; status = EVCacheMetricsFactory.THROTTLED; return null; } } tries++; data = getEVCacheItem(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false, isOriginalKeyHashed, true); if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup()); if (data != null) { client = fbClient; break; } } } } if (data != null) { if (event != null) event.setAttribute("status", "MGHIT"); } else { cacheOperation = EVCacheMetricsFactory.NO; if (event != null) event.setAttribute("status", "MGMISS"); if (log.isInfoEnabled() && shouldLog()) log.info("META_GET : APP " + _appName + " ; cache miss for key : " + evcKey); } if (log.isDebugEnabled() && shouldLog()) log.debug("META_GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup()); if (event != null) endEvent(event); return data; } catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) { status = EVCacheMetricsFactory.TIMEOUT; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("CheckedOperationTimeoutException getting with meta data for APP " + _appName + ", key = " + evcKey + ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex); } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("Exception getting with meta data for APP " + _appName + ", key = " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.META_GET.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("META_GET : APP " + _appName + ", Took " + duration + " milliSec."); } } private int policyToCount(Policy policy, int count) { if (policy == null) return 0; switch (policy) { case NONE: return 0; case ONE: return 1; case QUORUM: if (count == 0) return 0; else if (count <= 2) return count; else return (count / 2) + 1; case ALL_MINUS_1: if (count == 0) return 0; else if (count <= 2) return 1; else return count - 1; default: return count; } } public <T> T get(String key, Transcoder<T> tc, Policy policy) throws EVCacheException { if (null == key) throw new IllegalArgumentException(); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET); if (throwExc) throw new EVCacheException("Could not find a client to asynchronously get the data"); return null; // Fast failure } final int expectedSuccessCount = policyToCount(policy, clients.length); if(expectedSuccessCount <= 1) return get(key, tc); final long startTime = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; String cacheOperation = EVCacheMetricsFactory.YES; int tries = 1; try { final List<Future<T>> futureList = new ArrayList<Future<T>>(clients.length); final long endTime = startTime + _pool.getReadTimeout().get().intValue(); for (EVCacheClient client : clients) { final Future<T> future = getGetFuture(client, key, tc, throwExc); futureList.add(future); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : APP " + _appName + ", Future " + future + " for key : " + key + " with policy : " + policy + " for client : " + client); } final Map<T, List<EVCacheClient>> evcacheClientMap = new HashMap<T, List<EVCacheClient>>(); //final Map<T, Integer> tMap = new HashMap<T,Integer>(); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Total Requests " + clients.length + "; Expected Success Count : " + expectedSuccessCount); for(Future<T> future : futureList) { try { if(future instanceof EVCacheOperationFuture) { EVCacheOperationFuture<T> evcacheOperationFuture = (EVCacheOperationFuture<T>)future; long duration = endTime - System.currentTimeMillis(); if(duration < 20) duration = 20; if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : block duration : " + duration); final T t = evcacheOperationFuture.get(duration, TimeUnit.MILLISECONDS, throwExc, false); if (log.isTraceEnabled() && shouldLog()) log.trace("GET : CONSISTENT : value : " + t); if(t != null) { final List<EVCacheClient> cList = evcacheClientMap.computeIfAbsent(t, k -> new ArrayList<EVCacheClient>(clients.length)); cList.add(evcacheOperationFuture.getEVCacheClient()); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Added Client to ArrayList " + cList); } } } catch (Exception e) { log.error("Exception",e); } } T retVal = null; /* TODO : use metaget to get TTL and set it. For now we will delete the inconsistent value */ for(Entry<T, List<EVCacheClient>> entry : evcacheClientMap.entrySet()) { if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Existing Count for Value : " + entry.getValue().size() + "; expectedSuccessCount : " + expectedSuccessCount); if(entry.getValue().size() >= expectedSuccessCount) { retVal = entry.getKey(); } else { for(EVCacheClient client : entry.getValue()) { if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Delete in-consistent vale from : " + client); client.delete(key); } } } if(retVal != null) { if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : policy : " + policy + " was met. Will return the value. Total Duration : " + (System.currentTimeMillis() - startTime) + " milli Seconds."); return retVal; } if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : policy : " + policy + " was NOT met. Will return NULL. Total Duration : " + (System.currentTimeMillis() - startTime) + " milli Seconds."); return null; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (!throwExc) return null; throw new EVCacheException("Exception getting data for APP " + _appName + ", key = " + key, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- startTime; getTimer(Call.GET_ALL.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : APP " + _appName + ", Took " + duration + " milliSec."); } } public <T> Single<T> get(String key, Scheduler scheduler) { return this.get(key, (Transcoder<T>) _transcoder, scheduler); } public <T> Single<T> get(String key, Transcoder<T> tc, Scheduler scheduler) { if (null == key) return Single.error(new IllegalArgumentException("Key cannot be null")); final boolean throwExc = doThrowException(); final EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET); return Single.error(new EVCacheException("Could not find a client to get the data APP " + _appName)); } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET); return Single.error(new EVCacheException("Request Throttled for app " + _appName + " & key " + key)); } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); final boolean hasZF = hasZoneFallback(); final boolean throwEx = hasZF ? false : throwExc; return getData(client, evcKey, tc, throwEx, hasZF, scheduler).flatMap(data -> { if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { return Observable.concat(Observable.from(fbClients).map( fbClient -> getData(fbClients.indexOf(fbClient), fbClients.size(), fbClient, evcKey, tc, throwEx, throwExc, false, scheduler) //TODO : for the last one make sure to pass throwExc //.doOnSuccess(fbData -> increment("RETRY_" + ((fbData == null) ? "MISS" : "HIT"))) .toObservable())) .firstOrDefault(null, fbData -> (fbData != null)).toSingle(); } } return Single.just(data); }).map(data -> { //increment("GetCall"); if (data != null) { //increment("GetHit"); if (event != null) event.setAttribute("status", "GHIT"); } else { //increment("GetMiss"); if (event != null) event.setAttribute("status", "GMISS"); if (log.isInfoEnabled() && shouldLog()) log.info("GET : APP " + _appName + " ; cache miss for key : " + evcKey); } if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client .getServerGroup()); if (event != null) endEvent(event); return data; }).onErrorReturn(ex -> { if (ex instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) { if (event != null) { event.setStatus(EVCacheMetricsFactory.TIMEOUT); eventError(event, ex); } if (!throwExc) return null; throw sneakyThrow(new EVCacheException("CheckedOperationTimeoutException getting data for APP " + _appName + ", key = " + evcKey + ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex)); } else { if (event != null) { event.setStatus(EVCacheMetricsFactory.ERROR); eventError(event, ex); } if (!throwExc) return null; throw sneakyThrow(new EVCacheException("Exception getting data for APP " + _appName + ", key = " + evcKey, ex)); } }).doAfterTerminate(() -> { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, null, EVCacheMetricsFactory.SUCCESS, 1, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + duration + " milliSec."); }); } private <T> T getData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF) throws Exception { if (client == null) return null; final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc; try { String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient()); if(hashKey != null) { final Object obj = client.get(hashKey, evcacheValueTranscoder, throwException, hasZF); if(obj != null && obj instanceof EVCacheValue) { final EVCacheValue val = (EVCacheValue)obj; if(!val.getKey().equals(canonicalKey)) { incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.GET.name(), EVCacheMetricsFactory.READ); return null; } final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE); return transcoder.decode(cd); } else { return null; } } else { return client.get(canonicalKey, transcoder, throwException, hasZF); } } catch (EVCacheConnectException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (EVCacheReadQueueException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (EVCacheException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (Exception ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey, ex); if (!throwException || hasZF) return null; throw ex; } } private <T> CompletableFuture<T> getAsyncData(EVCacheClient client, EVCacheEvent event, EVCacheKey key, Transcoder<T> tc) { if (event != null) { if (shouldThrottle(event)) { CompletableFuture<T> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(new EVCacheException("Request Throttled for app " + _appName + " & key " + key)); return completableFuture; } } return getAsyncData(client, key, tc); } private <T> CompletableFuture<T> getAsyncData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc) { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc; String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient()); if (hashKey != null) { if (log.isDebugEnabled() && shouldLog()) { log.debug("Fetching data with hashKey {} ", hashKey); } return client.getAsync(hashKey, evcacheValueTranscoder) .thenApply(val -> getData(transcoder, canonicalKey, val)) .exceptionally(ex -> handleClientException(hashKey, ex)); } else { if (log.isDebugEnabled() && shouldLog()) { log.debug("Fetching data with canonicalKey {} ", canonicalKey); } return client.getAsync(canonicalKey, transcoder) .exceptionally(ex -> handleClientException(canonicalKey, ex)); } } private <T> T handleClientException(String evcKey, Throwable ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey + ":" + ex); throw sneakyThrow(ex); } private <T> T getData(Transcoder<T> transcoder, String canonicalKey, Object obj) { if (obj instanceof EVCacheValue) { final EVCacheValue val = (EVCacheValue) obj; if (!val.getKey().equals(canonicalKey)) { incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.GET.name(), EVCacheMetricsFactory.READ); return null; } final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE); return transcoder.decode(cd); } else { return null; } } protected EVCacheItemMetaData getEVCacheItemMetaData(EVCacheClient client, EVCacheKey evcKey, boolean throwException, boolean hasZF, boolean isOriginalKeyHashed) throws Exception { if (client == null) return null; try { return client.metaDebug(isOriginalKeyHashed ? evcKey.getKey() : evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder())); } catch (EVCacheConnectException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (EVCacheReadQueueException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (EVCacheException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (Exception ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting with metadata for APP " + _appName + ", key : " + evcKey, ex); if (!throwException || hasZF) return null; throw ex; } } protected <T> EVCacheItem<T> getEVCacheItem(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF, boolean isOriginalKeyHashed, boolean desearilizeEVCacheValue) throws Exception { if (client == null) return null; final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc; try { String hashKey = isOriginalKeyHashed ? evcKey.getKey() : evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient()); if (hashKey != null) { if(desearilizeEVCacheValue) { final EVCacheItem<Object> obj = client.metaGet(hashKey, evcacheValueTranscoder, throwException, hasZF); if (null == obj) return null; if (obj.getData() instanceof EVCacheValue) { final EVCacheValue val = (EVCacheValue) obj.getData(); if (null == val) { return null; } // compare the key embedded in the value to the original key only if the original key is not passed hashed if (!isOriginalKeyHashed && !(val.getKey().equals(canonicalKey))) { incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.META_GET.name(), EVCacheMetricsFactory.META_GET_OPERATION); return null; } final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE); T t = transcoder.decode(cd); obj.setData(t); obj.setFlag(val.getFlags()); return (EVCacheItem<T>) obj; } else { return null; } } else { final EVCacheItem<CachedData> obj = client.metaGet(hashKey, new ChunkTranscoder(), throwException, hasZF); if (null == obj) return null; return (EVCacheItem<T>) obj; } } else { return client.metaGet(canonicalKey, transcoder, throwException, hasZF); } } catch (EVCacheConnectException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (EVCacheReadQueueException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (EVCacheException ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw ex; } catch (Exception ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting with meta data for APP " + _appName + ", key : " + evcKey, ex); if (!throwException || hasZF) return null; throw ex; } } private <T> Single<T> getData(int index, int size, EVCacheClient client, EVCacheKey canonicalKey, Transcoder<T> tc, boolean throwEx, boolean throwExc, boolean hasZF, Scheduler scheduler) { if(index >= size -1) throwEx = throwExc; return getData(client, canonicalKey, tc, throwEx, hasZF, scheduler); } private <T> Single<T> getData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF, Scheduler scheduler) { if (client == null) return Single.error(new IllegalArgumentException("Client cannot be null")); if(evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) { return Single.error(new IllegalArgumentException("Not supported")); } else { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc; return client.get(evcKey.getCanonicalKey(client.isDuetClient()), transcoder, throwException, hasZF, scheduler).onErrorReturn(ex -> { if (ex instanceof EVCacheReadQueueException) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw sneakyThrow(ex); } else if (ex instanceof EVCacheException) { if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex); if (!throwException || hasZF) return null; throw sneakyThrow(ex); } else { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey, ex); if (!throwException || hasZF) return null; throw sneakyThrow(ex); } }); } } private final int MAX_IN_SEC = 2592000; private void checkTTL(int timeToLive, Call call) throws IllegalArgumentException { try { if(timeToLive < 0) throw new IllegalArgumentException ("Time to Live ( " + timeToLive + ") must be great than or equal to 0."); final long currentTimeInMillis = System.currentTimeMillis(); if(timeToLive > currentTimeInMillis) throw new IllegalArgumentException ("Time to Live ( " + timeToLive + ") must be in seconds."); if(timeToLive > MAX_IN_SEC && timeToLive < currentTimeInMillis/1000) throw new IllegalArgumentException ("If providing Time to Live ( " + timeToLive + ") in seconds as epoc value, it should be greater than current time " + currentTimeInMillis/1000); } catch (IllegalArgumentException iae) { incrementFastFail(EVCacheMetricsFactory.INVALID_TTL, call); throw iae; } } public <T> T getAndTouch(String key, int timeToLive) throws EVCacheException { return this.getAndTouch(key, timeToLive, (Transcoder<T>) _transcoder); } public <T> Single<T> getAndTouch(String key, int timeToLive, Scheduler scheduler) { return this.getAndTouch(key, timeToLive, (Transcoder<T>) _transcoder, scheduler); } public <T> Single<T> getAndTouch(String key, int timeToLive, Transcoder<T> tc, Scheduler scheduler) { if (null == key) return Single.error(new IllegalArgumentException("Key cannot be null")); checkTTL(timeToLive, Call.GET_AND_TOUCH); if(hashKey.get()) { return Single.error(new IllegalArgumentException("Not supported")); } final boolean throwExc = doThrowException(); final EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET_AND_TOUCH); return Single.error(new EVCacheException("Could not find a client to get and touch the data for APP " + _appName)); } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET_AND_TOUCH); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH); return Single.error(new EVCacheException("Request Throttled for app " + _appName + " & key " + key)); } event.setTTL(timeToLive); startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); final boolean hasZF = hasZoneFallback(); final boolean throwEx = hasZF ? false : throwExc; //anyway we have to touch all copies so let's just reuse getData instead of getAndTouch return getData(client, evcKey, tc, throwEx, hasZF, scheduler).flatMap(data -> { if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { return Observable.concat(Observable.from(fbClients).map( //TODO : for the last one make sure to pass throwExc fbClient -> getData(fbClients.indexOf(fbClient), fbClients.size(), fbClient, evcKey, tc, throwEx, throwExc, false, scheduler) .doOnSuccess(fbData -> { //increment("RETRY_" + ((fbData == null) ? "MISS" : "HIT")); }) .toObservable())) .firstOrDefault(null, fbData -> (fbData != null)).toSingle(); } } return Single.just(data); }).map(data -> { //increment("GetCall"); if (data != null) { //increment("GetHit"); if (event != null) event.setAttribute("status", "THIT"); // touch all copies try { touchData(evcKey, timeToLive); } catch (Exception e) { throw sneakyThrow(new EVCacheException("Exception performing touch for APP " + _appName + ", key = " + evcKey, e)); } if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client .getServerGroup()); } else { //increment("GetMiss"); if (event != null) event.setAttribute("status", "TMISS"); if (log.isInfoEnabled() && shouldLog()) log.info("GET_AND_TOUCH : APP " + _appName + " ; cache miss for key : " + evcKey); } if (event != null) endEvent(event); return data; }).onErrorReturn(ex -> { if (ex instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) { if (event != null) { event.setStatus(EVCacheMetricsFactory.TIMEOUT); eventError(event, ex); } if (!throwExc) return null; throw sneakyThrow(new EVCacheException("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key = " + evcKey + ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex)); } else { if (event != null) { event.setStatus(EVCacheMetricsFactory.ERROR); eventError(event, ex); } if (event != null) eventError(event, ex); if (!throwExc) return null; throw sneakyThrow(new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex)); } }).doAfterTerminate(() -> { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, null, EVCacheMetricsFactory.SUCCESS, 1, maxReadDuration.get().intValue(),client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", Took " + duration+ " milliSec."); }); } @Override public <T> T getAndTouch(String key, int timeToLive, Transcoder<T> tc) throws EVCacheException { if (null == key) throw new IllegalArgumentException("Key cannot be null"); checkTTL(timeToLive, Call.GET_AND_TOUCH); final EVCacheKey evcKey = getEVCacheKey(key); if (_useInMemoryCache.get()) { final boolean throwExc = doThrowException(); T value = null; try { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc; value = (T) getInMemoryCache(transcoder).get(evcKey); } catch (ExecutionException e) { if(throwExc) { if(e.getCause() instanceof DataNotFoundException) { return null; } if(e.getCause() instanceof EVCacheException) { if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e); throw (EVCacheException)e.getCause(); } throw new EVCacheException("ExecutionException", e); } } if (value != null) { try { touchData(evcKey, timeToLive); } catch (Exception e) { if (throwExc) throw new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, e); } return value; } } if(ignoreTouch.get()) { return doGet(evcKey, tc); } else { return doGetAndTouch(evcKey, timeToLive, tc); } } <T> T doGetAndTouch(EVCacheKey evcKey, int timeToLive, Transcoder<T> tc) throws EVCacheException { final boolean throwExc = doThrowException(); EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET_AND_TOUCH); if (throwExc) throw new EVCacheException("Could not find a client to get and touch the data for App " + _appName); return null; // Fast failure } final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET_AND_TOUCH); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH); return null; } event.setTTL(timeToLive); startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String cacheOperation = EVCacheMetricsFactory.YES; int tries = 1; String status = EVCacheMetricsFactory.SUCCESS; try { final boolean hasZF = hasZoneFallback(); boolean throwEx = hasZF ? false : throwExc; T data = getData(client, evcKey, tc, throwEx, hasZF); if (data == null && hasZF) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); for (int i = 0; i < fbClients.size(); i++) { final EVCacheClient fbClient = fbClients.get(i); if(i >= fbClients.size() - 1) throwEx = throwExc; if (event != null) { try { if (shouldThrottle(event)) { status = EVCacheMetricsFactory.THROTTLED; if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; status = EVCacheMetricsFactory.THROTTLED; return null; } } tries++; data = getData(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false); if (log.isDebugEnabled() && shouldLog()) log.debug("GetAndTouch Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup()); if (data != null) { client = fbClient; break; } } } if (data != null) { if (event != null) event.setAttribute("status", "THIT"); // touch all copies touchData(evcKey, timeToLive); if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup()); } else { cacheOperation = EVCacheMetricsFactory.NO; if (log.isInfoEnabled() && shouldLog()) log.info("GET_AND_TOUCH : APP " + _appName + " ; cache miss for key : " + evcKey); if (event != null) event.setAttribute("status", "TMISS"); } if (event != null) endEvent(event); return data; } catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) { status = EVCacheMetricsFactory.TIMEOUT; if (event != null) { event.setStatus(status); eventError(event, ex); } if (log.isDebugEnabled() && shouldLog()) log.debug("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key : " + evcKey, ex); if (!throwExc) return null; throw new EVCacheException("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key = " + evcKey + ".\nYou can set the following property to increase the timeout " + _appName+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex); } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("Took " + duration + " milliSec to get&Touch the value for APP " + _appName + ", key " + evcKey); } } @Override public Future<Boolean>[] touch(String key, int timeToLive) throws EVCacheException { checkTTL(timeToLive, Call.TOUCH); final EVCacheLatch latch = this.touch(key, timeToLive, null); if (latch == null) return new EVCacheFuture[0]; final List<Future<Boolean>> futures = latch.getAllFutures(); if (futures == null || futures.isEmpty()) return new EVCacheFuture[0]; final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()]; for (int i = 0; i < futures.size(); i++) { final Future<Boolean> future = futures.get(i); if (future instanceof EVCacheFuture) { eFutures[i] = (EVCacheFuture) future; } else if (future instanceof EVCacheOperationFuture) { final EVCacheOperationFuture<Boolean> evfuture = (EVCacheOperationFuture<Boolean>)future; eFutures[i] = new EVCacheFuture(future, key, _appName, evfuture.getServerGroup(), evfuture.getEVCacheClient()); } else { eFutures[i] = new EVCacheFuture(future, key, _appName, null); } } return eFutures; } public <T> EVCacheLatch touch(String key, int timeToLive, Policy policy) throws EVCacheException { if (null == key) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.TOUCH); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.TOUCH); if (throwExc) throw new EVCacheException("Could not find a client to set the data"); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.TOUCH); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.TOUCH); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.TOUCH); return null; } startEvent(event); } String status = EVCacheMetricsFactory.SUCCESS; final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); try { final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName); touchData(evcKey, timeToLive, clients, latch); if (event != null) { event.setTTL(timeToLive); if(_eventsUsingLatchFP.get()) { latch.setEVCacheEvent(event); latch.scheduledFutureValidation(); } else { endEvent(event); } } return latch; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception touching the data for APP " + _appName + ", key : " + evcKey, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName); throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTTLDistributionSummary(Call.TOUCH.name(), EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.TTL).record(timeToLive); getTimer(Call.TOUCH.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("TOUCH : APP " + _appName + " for key : " + evcKey + " with timeToLive : " + timeToLive); } } private void touchData(EVCacheKey evcKey, int timeToLive) throws Exception { final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); touchData(evcKey, timeToLive, clients); } private void touchData(EVCacheKey evcKey, int timeToLive, EVCacheClient[] clients) throws Exception { touchData(evcKey, timeToLive, clients, null); } private void touchData(EVCacheKey evcKey, int timeToLive, EVCacheClient[] clients, EVCacheLatch latch ) throws Exception { checkTTL(timeToLive, Call.TOUCH); for (EVCacheClient client : clients) { client.touch(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), timeToLive, latch); } } public <T> Future<T> getAsynchronous(String key) throws EVCacheException { return this.getAsynchronous(key, (Transcoder<T>) _transcoder); }; @Override public <T> Future<T> getAsynchronous(final String key, final Transcoder<T> tc) throws EVCacheException { if (null == key) throw new IllegalArgumentException("Key is null."); final boolean throwExc = doThrowException(); final EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.ASYNC_GET); if (throwExc) throw new EVCacheException("Could not find a client to asynchronously get the data"); return null; // Fast failure } return getGetFuture(client, key, tc, throwExc); } private <T> Future<T> getGetFuture(final EVCacheClient client, final String key, final Transcoder<T> tc, final boolean throwExc) throws EVCacheException { final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.ASYNC_GET); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ASYNC_GET); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ASYNC_GET); return null; } startEvent(event); } String status = EVCacheMetricsFactory.SUCCESS; final Future<T> r; final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); try { String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient()); if(hashKey != null) { final Future<Object> objFuture = client.asyncGet(hashKey, evcacheValueTranscoder, throwExc, false); r = new Future<T> () { @Override public boolean cancel(boolean mayInterruptIfRunning) { return objFuture.cancel(mayInterruptIfRunning); } @Override public boolean isCancelled() { return objFuture.isCancelled(); } @Override public boolean isDone() { return objFuture.isDone(); } @Override public T get() throws InterruptedException, ExecutionException { return getFromObj(objFuture.get()); } private T getFromObj(Object obj) { if(obj != null && obj instanceof EVCacheValue) { final EVCacheValue val = (EVCacheValue)obj; if(!val.getKey().equals(canonicalKey)) { incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.ASYNC_GET.name(), EVCacheMetricsFactory.READ); return null; } final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE); final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc; return transcoder.decode(cd); } else { return null; } } @Override public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return getFromObj(objFuture.get(timeout, unit)); } }; } else { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc; r = client.asyncGet(canonicalKey, transcoder, throwExc, false); } if (event != null) endEvent(event); } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug( "Exception while getting data for keys Asynchronously APP " + _appName + ", key : " + key, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("Exception getting data for APP " + _appName + ", key : " + key, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.ASYNC_GET.name(), EVCacheMetricsFactory.READ, null, status, 1, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("Took " + duration + " milliSec to execute AsyncGet the value for APP " + _appName + ", key " + key); } return r; } private <T> CompletableFuture<Map<EVCacheKey, T>> getAsyncBulkData(EVCacheClient client, EVCacheEvent event, List<EVCacheKey> keys, Transcoder<T> tc) { if (event != null) { if (shouldThrottle(event)) { throw sneakyThrow(new EVCacheException("Request Throttled for app " + _appName + " & key " + keys)); } } return getAsyncBulkData(client, keys, tc); } private <T> CompletableFuture<Map<EVCacheKey, T>> getAsyncBulkData(EVCacheClient client, List<EVCacheKey> evcacheKeys, Transcoder<T> tc) { KeyMapDto keyMapDto = buildKeyMap(client, evcacheKeys); final Map<String, EVCacheKey> keyMap = keyMapDto.getKeyMap(); boolean hasHashedKey = keyMapDto.isKeyHashed(); if (hasHashedKey) { if (log.isDebugEnabled() && shouldLog()) { log.debug("fetching bulk data with hashedKey {} ",evcacheKeys); } return client.getAsyncBulk(keyMap.keySet(), evcacheValueTranscoder) .thenApply(data -> buildHashedKeyValueResult(data, tc, client, keyMap)) .exceptionally(t -> handleBulkException(t, evcacheKeys)); } else { final Transcoder<T> tcCopy; if (tc == null && _transcoder != null) { tcCopy = (Transcoder<T>) _transcoder; } else { tcCopy = tc; } if (log.isDebugEnabled() && shouldLog()) { log.debug("fetching bulk data with non hashedKey {} ",keyMap.keySet()); } return client.getAsyncBulk(keyMap.keySet(), tcCopy ) .thenApply(data -> buildNonHashedKeyValueResult(data, keyMap)) .exceptionally(t -> handleBulkException(t, evcacheKeys)); } } private <T> Map<EVCacheKey, T> handleBulkException(Throwable t, Collection<EVCacheKey> evCacheKeys) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getBulk data for APP " + _appName + ", key : " + evCacheKeys, t); throw Sneaky.sneakyThrow(t); } private KeyMapDto buildKeyMap(EVCacheClient client, Collection<EVCacheKey> evcacheKeys) { boolean hasHashedKey = false; final Map<String, EVCacheKey> keyMap = new HashMap<String, EVCacheKey>(evcacheKeys.size() * 2); for (EVCacheKey evcKey : evcacheKeys) { String key = evcKey.getCanonicalKey(client.isDuetClient()); String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); if (hashKey != null) { if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + key + "], has been hashed [" + hashKey + "]"); key = hashKey; hasHashedKey = true; } keyMap.put(key, evcKey); } return new KeyMapDto(keyMap, hasHashedKey); } private <T> Map<EVCacheKey, T> buildNonHashedKeyValueResult(Map<String, T> objMap, Map<String, EVCacheKey> keyMap) { final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1); for (Map.Entry<String, T> i : objMap.entrySet()) { final EVCacheKey evcKey = keyMap.get(i.getKey()); if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); retMap.put(evcKey, i.getValue()); } return retMap; } private <T> Map<EVCacheKey, T> buildHashedKeyValueResult(Map<String, Object> objMap, Transcoder<T> tc, EVCacheClient client, Map<String, EVCacheKey> keyMap) { final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1); for (Map.Entry<String, Object> i : objMap.entrySet()) { final Object obj = i.getValue(); if (obj instanceof EVCacheValue) { if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", The value for key [" + i.getKey() + "] is EVCache Value"); final EVCacheValue val = (EVCacheValue) obj; final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE); final T tVal; if (tc == null) { tVal = (T) client.getTranscoder().decode(cd); } else { tVal = tc.decode(cd); } final EVCacheKey evcKey = keyMap.get(i.getKey()); if (evcKey.getCanonicalKey(client.isDuetClient()).equals(val.getKey())) { if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); retMap.put(evcKey, tVal); } else { if (log.isDebugEnabled() && shouldLog()) log.debug("CACHE COLLISION : APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.COMPLETABLE_FUTURE_GET_BULK.name(), EVCacheMetricsFactory.READ); } } else { final EVCacheKey evcKey = keyMap.get(i.getKey()); if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); retMap.put(evcKey, (T) obj); } } return retMap; } private <T> Map<EVCacheKey, T> getBulkData(EVCacheClient client, Collection<EVCacheKey> evcacheKeys, Transcoder<T> tc, boolean throwException, boolean hasZF) throws Exception { try { boolean hasHashedKey = false; final Map<String, EVCacheKey> keyMap = new HashMap<>(evcacheKeys.size() * 2); for(EVCacheKey evcKey : evcacheKeys) { String key = evcKey.getCanonicalKey(client.isDuetClient()); String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); if(hashKey != null) { if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + key + "], has been hashed [" + hashKey + "]"); key = hashKey; hasHashedKey = true; } keyMap.put(key, evcKey); } if(hasHashedKey) { final Map<String, Object> objMap = client.getBulk(keyMap.keySet(), evcacheValueTranscoder, throwException, hasZF); final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1); for (Map.Entry<String, Object> i : objMap.entrySet()) { final Object obj = i.getValue(); if(obj instanceof EVCacheValue) { if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", The value for key [" + i.getKey() + "] is EVCache Value"); final EVCacheValue val = (EVCacheValue)obj; final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE); final T tVal; if(tc == null) { tVal = (T)client.getTranscoder().decode(cd); } else { tVal = tc.decode(cd); } final EVCacheKey evcKey = keyMap.get(i.getKey()); if(evcKey.getCanonicalKey(client.isDuetClient()).equals(val.getKey())) { if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); retMap.put(evcKey, tVal); } else { if (log.isDebugEnabled() && shouldLog()) log.debug("CACHE COLLISION : APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.BULK.name(), EVCacheMetricsFactory.READ); } } else { final EVCacheKey evcKey = keyMap.get(i.getKey()); if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); retMap.put(evcKey, (T)obj); } } return retMap; } else { if(tc == null && _transcoder != null) tc = (Transcoder<T>)_transcoder; final Map<String, T> objMap = client.getBulk(keyMap.keySet(), tc, throwException, hasZF); final Map<EVCacheKey, T> retMap = new HashMap<EVCacheKey, T>((int)(objMap.size()/0.75) + 1); for (Map.Entry<String, T> i : objMap.entrySet()) { final EVCacheKey evcKey = keyMap.get(i.getKey()); if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey); retMap.put(evcKey, i.getValue()); } return retMap; } } catch (Exception ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getBulk data for APP " + _appName + ", key : " + evcacheKeys, ex); if (!throwException || hasZF) return null; throw ex; } } public <T> Map<String, T> getBulk(Collection<String> keys, Transcoder<T> tc) throws EVCacheException { return getBulk(keys, tc, false, 0); } public <T> Map<String, T> getBulkAndTouch(Collection<String> keys, Transcoder<T> tc, int timeToLive) throws EVCacheException { return getBulk(keys, tc, true, timeToLive); } private <T> Map<String, T> getBulk(final Collection<String> keys, Transcoder<T> tc, boolean touch, int timeToLive) throws EVCacheException { if (null == keys) throw new IllegalArgumentException(); if (keys.isEmpty()) return Collections.<String, T> emptyMap(); checkTTL(timeToLive, Call.BULK); final boolean throwExc = doThrowException(); final EVCacheClient client = _pool.getEVCacheClientForRead(); if (client == null) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.BULK); if (throwExc) throw new EVCacheException("Could not find a client to get the data in bulk"); return Collections.<String, T> emptyMap();// Fast failure } final Map<String, T> decanonicalR = new HashMap<String, T>((keys.size() * 4) / 3 + 1); final Collection<EVCacheKey> evcKeys = new ArrayList<EVCacheKey>(); /* Canonicalize keys and perform fast failure checking */ for (String k : keys) { final EVCacheKey evcKey = getEVCacheKey(k); T value = null; if (_useInMemoryCache.get()) { try { final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc; value = (T) getInMemoryCache(transcoder).get(evcKey); if(value == null) if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in inmemory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value ); } catch (ExecutionException e) { if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e); throw new EVCacheException("ExecutionException", e); } } if(value == null) { evcKeys.add(evcKey); } else { decanonicalR.put(evcKey.getKey(), value); if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : "")); } } if(evcKeys.size() == 0 && decanonicalR.size() == keys.size()) { if (log.isDebugEnabled() && shouldLog()) log.debug("All Values retrieved from inmemory cache for APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : "")); return decanonicalR; } final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.BULK); if (event != null) { event.setEVCacheKeys(evcKeys); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.BULK); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & keys " + keys); return Collections.<String, T> emptyMap(); } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.BULK); return null; } event.setTTL(timeToLive); startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String cacheOperation = EVCacheMetricsFactory.YES; int tries = 1; String status = EVCacheMetricsFactory.SUCCESS; try { final boolean hasZF = hasZoneFallbackForBulk(); boolean throwEx = hasZF ? false : throwExc; Map<EVCacheKey, T> retMap = getBulkData(client, evcKeys, tc, throwEx, hasZF); List<EVCacheClient> fbClients = null; if (hasZF) { if (retMap == null || retMap.isEmpty()) { fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { for (int i = 0; i < fbClients.size(); i++) { final EVCacheClient fbClient = fbClients.get(i); if(i >= fbClients.size() - 1) throwEx = throwExc; if (event != null) { try { if (shouldThrottle(event)) { status = EVCacheMetricsFactory.THROTTLED; if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKeys); return null; } } catch(EVCacheException ex) { if(throwExc) throw ex; status = EVCacheMetricsFactory.THROTTLED; return null; } } tries++; retMap = getBulkData(fbClient, evcKeys, tc, throwEx, (i < fbClients.size() - 1) ? true : false); if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + evcKeys + (log.isTraceEnabled() ? "], Value [" + retMap : "") + "], zone : " + fbClient.getZone()); if (retMap != null && !retMap.isEmpty()) break; } //increment("BULK-FULL_RETRY-" + ((retMap == null || retMap.isEmpty()) ? "MISS" : "HIT")); } } else if (retMap != null && keys.size() > retMap.size() && _bulkPartialZoneFallbackFP.get()) { final int initRetrySize = keys.size() - retMap.size(); List<EVCacheKey> retryEVCacheKeys = new ArrayList<EVCacheKey>(initRetrySize); for (Iterator<EVCacheKey> keysItr = evcKeys.iterator(); keysItr.hasNext();) { final EVCacheKey key = keysItr.next(); if (!retMap.containsKey(key)) { retryEVCacheKeys.add(key); } } fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (fbClients != null && !fbClients.isEmpty()) { for (int ind = 0; ind < fbClients.size(); ind++) { final EVCacheClient fbClient = fbClients.get(ind); if (event != null) { try { if (shouldThrottle(event)) { status = EVCacheMetricsFactory.THROTTLED; if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & keys " + retryEVCacheKeys); return null; } } catch(EVCacheException ex) { status = EVCacheMetricsFactory.THROTTLED; if(throwExc) throw ex; return null; } } tries++; final Map<EVCacheKey, T> fbRetMap = getBulkData(fbClient, retryEVCacheKeys, tc, false, hasZF); if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + retryEVCacheKeys + "], Fallback Server Group : " + fbClient .getServerGroup().getName()); for (Map.Entry<EVCacheKey, T> i : fbRetMap.entrySet()) { retMap.put(i.getKey(), i.getValue()); if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + i.getKey() + (log.isTraceEnabled() ? "], Value [" + i.getValue(): "]")); } if (retryEVCacheKeys.size() == fbRetMap.size()) break; if (ind < fbClients.size()) { retryEVCacheKeys = new ArrayList<EVCacheKey>(keys.size() - retMap.size()); for (Iterator<EVCacheKey> keysItr = evcKeys.iterator(); keysItr.hasNext();) { final EVCacheKey key = keysItr.next(); if (!retMap.containsKey(key)) { retryEVCacheKeys.add(key); } } } } } if (log.isDebugEnabled() && shouldLog() && retMap.size() == keys.size()) log.debug("Fallback SUCCESS for APP " + _appName + ", retMap [" + retMap + "]"); } } if(decanonicalR.isEmpty()) { if (retMap == null || retMap.isEmpty()) { if (log.isInfoEnabled() && shouldLog()) log.info("BULK : APP " + _appName + " ; Full cache miss for keys : " + keys); if (event != null) event.setAttribute("status", "BMISS_ALL"); final Map<String, T> returnMap = new HashMap<String, T>(); if (retMap != null && retMap.isEmpty()) { for (String k : keys) { returnMap.put(k, null); } } //increment("BulkMissFull"); cacheOperation = EVCacheMetricsFactory.NO; /* If both Retry and first request fail Exit Immediately. */ if (event != null) endEvent(event); return returnMap; } } /* Decanonicalize the keys */ boolean partialHit = false; final List<String> decanonicalHitKeys = new ArrayList<String>(retMap.size()); for (Iterator<EVCacheKey> itr = evcKeys.iterator(); itr.hasNext();) { final EVCacheKey key = itr.next(); final String deCanKey = key.getKey(); final T value = retMap.get(key); if (value != null) { decanonicalR.put(deCanKey, value); if (touch) touchData(key, timeToLive); decanonicalHitKeys.add(deCanKey); } else { partialHit = true; // this ensures the fallback was tried decanonicalR.put(deCanKey, null); } } if (!decanonicalR.isEmpty()) { if (!partialHit) { if (event != null) event.setAttribute("status", "BHIT"); } else { if (event != null) { event.setAttribute("status", "BHIT_PARTIAL"); event.setAttribute("BHIT_PARTIAL_KEYS", decanonicalHitKeys); } //increment("BulkHitPartial"); cacheOperation = EVCacheMetricsFactory.PARTIAL; if (log.isInfoEnabled() && shouldLog()) log.info("BULK_HIT_PARTIAL for APP " + _appName + ", keys in cache [" + decanonicalR + "], all keys [" + keys + "]"); } } if (log.isDebugEnabled() && shouldLog()) log.debug("BulkGet; APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : "")); if (event != null) endEvent(event); return decanonicalR; } catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) { status = EVCacheMetricsFactory.TIMEOUT; if (log.isDebugEnabled() && shouldLog()) log.debug("CheckedOperationTimeoutException getting bulk data for APP " + _appName + ", keys : " + evcKeys, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("CheckedOperationTimeoutException getting bulk data for APP " + _appName + ", keys = " + evcKeys + ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.bulkReadTimeout=<timeout in milli-seconds>", ex); } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception getting bulk data for APP " + _appName + ", keys = " + evcKeys, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return null; throw new EVCacheException("Exception getting bulk data for APP " + _appName + ", keys = " + evcKeys, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; if(bulkKeysSize == null) { final List<Tag> tagList = new ArrayList<Tag>(4); tagList.addAll(tags); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION)); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ)); // if(status != null) tagList.add(new BasicTag(EVCacheMetricsFactory.STATUS, status)); // if(tries >= 0) tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, String.valueOf(tries))); bulkKeysSize = EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.OVERALL_KEYS_SIZE, tagList); } bulkKeysSize.record(keys.size()); getTimer(Call.BULK.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("BULK : APP " + _appName + " Took " + duration + " milliSec to get the value for key " + evcKeys); } } private <T> CompletableFuture<EVCacheBulkDataDto<T>> handleBulkInMemory(Collection<String> keys, Transcoder<T> tc) { if (log.isDebugEnabled() && shouldLog()) log.debug("handleBulkInMemory with keys {} " + keys); final Map<String, T> decanonicalR = new HashMap<>((keys.size() * 4) / 3 + 1); final List<EVCacheKey> evcKeys = new ArrayList<>(); CompletableFuture<EVCacheBulkDataDto<T>> promise = new CompletableFuture<>(); try { EVCacheBulkDataDto<T> data = handleBulkInMemory(keys, tc, decanonicalR, evcKeys); promise.complete(data); } catch (Exception e) { promise.completeExceptionally(e); } return promise; } private <T> EVCacheBulkDataDto<T> handleBulkInMemory(Collection<String> keys, Transcoder<T> tc, Map<String, T> decanonicalR, List<EVCacheKey> evcKeys) throws Exception { for (String k : keys) { final EVCacheKey evcKey = getEVCacheKey(k); T value = getInMemory(evcKey, tc); if (value != null) { decanonicalR.put(evcKey.getKey(), value); if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : "")); } else { if (log.isDebugEnabled() && shouldLog()) log.debug("Key not present in in memory {} " + k); evcKeys.add(evcKey); } } return new EVCacheBulkDataDto<>(decanonicalR, evcKeys); } public <T> CompletableFuture<Map<String, T>> getAsyncBulk(String... keys) { return this.getAsyncBulk(Arrays.asList(keys), (Transcoder<T>) _transcoder); } public <T> CompletableFuture<Map<String, T>> getAsyncBulk(final Collection<String> keys, Transcoder<T> tc) { if (null == keys) throw new IllegalArgumentException(); if (keys.isEmpty()) return CompletableFuture.completedFuture(Collections.emptyMap()); return handleBulkInMemory(keys, tc) .thenCompose(dto -> doAsyncGetBulk(keys, tc, dto)); } private <T> CompletableFuture<Map<String, T>> doAsyncGetBulk(Collection<String> keys, Transcoder<T> tc, EVCacheBulkDataDto<T> dto) { // all keys handled by in memory if(dto.getEvcKeys().size() == 0 && dto.getDecanonicalR().size() == keys.size()) { if (log.isDebugEnabled() && shouldLog()) log.debug("All Values retrieved from in-memory cache for APP " + _appName + ", keys : " + keys); return CompletableFuture.completedFuture(dto.getDecanonicalR()); } final boolean throwExc = doThrowException(); CompletableFuture<Map<String, T>> errorFuture = new CompletableFuture<>(); EVCacheClient client = buildEvCacheClient(throwExc, Call.COMPLETABLE_FUTURE_GET_BULK, errorFuture); if (errorFuture.isCompletedExceptionally() || client == null) { if (client == null ) { if (log.isDebugEnabled() && shouldLog()) log.debug("doAsyncGetBulk is null"); errorFuture.complete(null); } return errorFuture; } if (log.isDebugEnabled() && shouldLog()) log.debug("Completed Building the client for doAsyncGetBulk"); //Building the start event EVCacheEvent event = buildAndStartEvent(client, dto.getEvcKeys(), throwExc, errorFuture, Call.COMPLETABLE_FUTURE_GET_BULK); if (errorFuture.isCompletedExceptionally()) { if (log.isDebugEnabled() && shouldLog()) log.debug("Error while building and starting the event for doAsyncGetBulk"); return errorFuture; } if (log.isDebugEnabled() && shouldLog()) log.debug("Cancelling the error future"); errorFuture.cancel(false); final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); StringBuilder status = new StringBuilder(EVCacheMetricsFactory.SUCCESS); StringBuilder cacheOperation = new StringBuilder(EVCacheMetricsFactory.YES); final boolean hasZF = hasZoneFallbackForBulk(); RetryCount retryCount = new RetryCount(); boolean throwEx = !hasZF && throwExc; return getAsyncBulkData(client, dto.getEvcKeys(), tc) .thenCompose(data -> handleBulkRetry(data, dto.getEvcKeys(), tc, client, event, hasZF, retryCount)) .handle((data, ex) -> { if (ex != null) { handleFullCacheMiss(data, event, keys, cacheOperation); handleException(ex, event); if (throwEx) { throw new RuntimeException(ex); } else { return null; } } else { Map<String, T> result = handleBulkData(dto.getDecanonicalR(), data, event, keys, dto.getEvcKeys(), cacheOperation); handleBulkFinally(status, retryCount, client, cacheOperation, keys, start); return result; } }); } private <T> Map<String, T> handleBulkData(Map<String, T> decanonicalR, Map<EVCacheKey, T> retMap, EVCacheEvent event, Collection<String> keys, List<EVCacheKey> evcKeys, StringBuilder cacheOperation) { if(retMap == null || retMap.isEmpty()) { return handleFullCacheMiss(retMap, event, keys, cacheOperation); } boolean partialHit = false; final List<String> decanonicalHitKeys = new ArrayList<>(retMap.size()); for (final EVCacheKey key : evcKeys) { final String deCanKey = key.getKey(); final T value = retMap.get(key); if (value != null) { decanonicalR.put(deCanKey, value); decanonicalHitKeys.add(deCanKey); } else { partialHit = true; // this ensures the fallback was tried decanonicalR.put(deCanKey, null); } } if (!decanonicalR.isEmpty()) { updateBulkGetEvent(decanonicalR, event, keys, partialHit, decanonicalHitKeys, cacheOperation); } if (log.isDebugEnabled() && shouldLog()) log.debug("Async BulkGet; APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : "")); if (event != null) endEvent(event); return decanonicalR; } private <T> void updateBulkGetEvent(Map<String, T> decanonicalR, EVCacheEvent event, Collection<String> keys, boolean partialHit, List<String> decanonicalHitKeys, StringBuilder cacheOperation) { if (!partialHit) { if (event != null) event.setAttribute("status", "ASYNC_BHIT"); } else { if (event != null) { event.setAttribute("status", "ASYNC_BHIT_PARTIAL"); event.setAttribute("ASYNC_BHIT_PARTIAL_KEYS", decanonicalHitKeys); } cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.PARTIAL); if (log.isInfoEnabled() && shouldLog()) log.info("ASYNC_BULK_HIT_PARTIAL for APP " + _appName + ", keys in cache [" + decanonicalR + "], all keys [" + keys + "]"); } } private <T> Map<String, T> handleFullCacheMiss(Map<EVCacheKey, T> retMap, EVCacheEvent event, Collection<String> keys, StringBuilder cacheOperation) { if (log.isInfoEnabled() && shouldLog()) log.info("ASYNC BULK : APP " + _appName + " ; Full cache miss for keys : " + keys); if (event != null) event.setAttribute("status", "ASYNC_BMISS_ALL"); final Map<String, T> returnMap = new HashMap<>(); if (retMap != null && retMap.isEmpty()) { for (String k : keys) { returnMap.put(k, null); } } cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.NO); if (event != null) endEvent(event); return returnMap; } private <T> CompletableFuture<Map<EVCacheKey, T>> handleFullRetry(EVCacheClient client, EVCacheEvent event, List<EVCacheKey> evcKeys, Transcoder<T> tc, RetryCount retryCount) { final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup()); if (log.isInfoEnabled() && shouldLog()) { log.info("Fetching the clients for retry {}", fbClients); } return handleFullBulkRetries(fbClients, 0, event, evcKeys, tc, retryCount); } private <T> CompletableFuture<Map<EVCacheKey, T>> handleFullBulkRetries(List<EVCacheClient> fbClients, int fbClientIndex, EVCacheEvent event, List<EVCacheKey> evcKeys, Transcoder<T> tc, RetryCount retryCount) { if (fbClientIndex >= fbClients.size()) { if (log.isInfoEnabled() && shouldLog()) { log.debug("Clients exhausted so returning the future with null result for keys {}", evcKeys); } return CompletableFuture.completedFuture(null); } if (log.isInfoEnabled() && shouldLog()) { EVCacheClient evCacheClient = fbClients.get(fbClientIndex); log.debug("Trying to fetching the data from server group {} client {} and keys {}", evCacheClient.getServerGroupName(), evCacheClient.getId(), evcKeys); } CompletableFuture<Map<EVCacheKey, T>> future = getAsyncBulkData(fbClients.get(fbClientIndex), event, evcKeys, tc); int nextIndex = fbClientIndex + 1; retryCount.incr(); return future .thenApply(s -> s != null ? CompletableFuture.completedFuture(s) : handleFullBulkRetries(fbClients, nextIndex, event, evcKeys, tc, retryCount)) .exceptionally(t -> handleFullBulkRetries(fbClients, nextIndex, event, evcKeys, tc, retryCount)) .thenCompose(Function.identity()); } private <T> CompletableFuture<Map<EVCacheKey, T>> handleBulkRetry(Map<EVCacheKey, T> retMap, List<EVCacheKey> evcKeys, Transcoder<T> tc, EVCacheClient client, EVCacheEvent event, boolean hasZF, RetryCount retryCount) { if (log.isInfoEnabled() && shouldLog()) { log.debug("handling Bulk retry with keys {}", evcKeys); } if (hasZF && (retMap == null || retMap.isEmpty())) { if (log.isInfoEnabled() && shouldLog()) { log.debug("Return map is null or empty for going for a full retry {} ", evcKeys); } return handleFullRetry(client, event, evcKeys, tc, retryCount); } if (log.isInfoEnabled() && shouldLog()) { log.debug("Async does not yet support partial retry for bulk. So completing the future or keys {}", evcKeys); } return CompletableFuture.completedFuture(retMap); } public <T> Map<String, T> getBulk(Collection<String> keys) throws EVCacheException { return (this.getBulk(keys, (Transcoder<T>) _transcoder)); } public <T> Map<String, T> getBulk(String... keys) throws EVCacheException { return (this.getBulk(Arrays.asList(keys), (Transcoder<T>) _transcoder)); } public <T> Map<String, T> getBulk(Transcoder<T> tc, String... keys) throws EVCacheException { return (this.getBulk(Arrays.asList(keys), tc)); } @Override public <T> EVCacheFuture[] set(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException { final EVCacheLatch latch = this.set(key, value, tc, timeToLive, null); if (latch == null) return new EVCacheFuture[0]; final List<Future<Boolean>> futures = latch.getAllFutures(); if (futures == null || futures.isEmpty()) return new EVCacheFuture[0]; final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()]; for (int i = 0; i < futures.size(); i++) { final Future<Boolean> future = futures.get(i); if (future instanceof EVCacheFuture) { eFutures[i] = (EVCacheFuture) future; } else if (future instanceof EVCacheOperationFuture) { eFutures[i] = new EVCacheFuture(futures.get(i), key, _appName, ((EVCacheOperationFuture<T>) futures.get(i)).getServerGroup()); } else { eFutures[i] = new EVCacheFuture(future, key, _appName, null); } } return eFutures; } public <T> EVCacheLatch set(String key, T value, Policy policy) throws EVCacheException { return set(key, value, (Transcoder<T>)_transcoder, _timeToLive, policy); } public <T> EVCacheLatch set(String key, T value, int timeToLive, Policy policy) throws EVCacheException { return set(key, value, (Transcoder<T>)_transcoder, timeToLive, policy); } public <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException { return set(key, value, tc, _timeToLive, policy); } public <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException { EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); return this.set(key, value, tc, timeToLive, policy, clients, clients.length - _pool.getWriteOnlyEVCacheClients().length); } protected <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount) throws EVCacheException { if ((null == key) || (null == value)) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.SET); final boolean throwExc = doThrowException(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.SET); if (throwExc) throw new EVCacheException("Could not find a client to set the data"); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.SET); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.SET); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheLatchImpl(policy, 0, _appName); } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.SET); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, latchCount, _appName); try { CachedData cd = null; CachedData cdHashed = null; for (EVCacheClient client : clients) { final String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient()); final String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); if(cd == null) { if (tc != null) { cd = tc.encode(value); } else if (_transcoder != null) { cd = ((Transcoder<Object>) _transcoder).encode(value); } else { cd = client.getTranscoder().encode(value); } } if (hashKey != null) { if(cdHashed == null) { final EVCacheValue val = new EVCacheValue(canonicalKey, cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis()); cdHashed = evcacheValueTranscoder.encode(val); } final Future<Boolean> future = client.set(hashKey, cdHashed, timeToLive, latch); if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Future " + future + " for hashed key : " + evcKey); } else { final Future<Boolean> future = client.set(canonicalKey, cd, timeToLive, latch); if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Future " + future + " for key : " + evcKey); } } if (event != null) { event.setTTL(timeToLive); event.setCachedData(cd); if(_eventsUsingLatchFP.get()) { latch.setEVCacheEvent(event); latch.scheduledFutureValidation(); } else { endEvent(event); } } return latch; } catch (Exception ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex); if (event != null) endEvent(event); status = EVCacheMetricsFactory.ERROR; if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName); throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTTLDistributionSummary(Call.SET.name(), EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.TTL).record(timeToLive); getTimer(Call.SET.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey); } } public <T> EVCacheFuture[] append(String key, T value, int timeToLive) throws EVCacheException { return this.append(key, value, null, timeToLive); } public <T> EVCacheFuture[] append(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException { if ((null == key) || (null == value)) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.APPEND); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.APPEND); if (throwExc) throw new EVCacheException("Could not find a client to set the data"); return new EVCacheFuture[0]; // Fast failure } final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.APPEND); final EVCacheKey evcKey = getEVCacheKey(key); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheFuture[0]; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; try { final EVCacheFuture[] futures = new EVCacheFuture[clients.length]; CachedData cd = null; int index = 0; for (EVCacheClient client : clients) { // ensure key hashing is not enabled if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) { throw new IllegalArgumentException("append is not supported when key hashing is enabled."); } if (cd == null) { if (tc != null) { cd = tc.encode(value); } else if ( _transcoder != null) { cd = ((Transcoder<Object>)_transcoder).encode(value); } else { cd = client.getTranscoder().encode(value); } //if (cd != null) EVCacheMetricsFactory.getInstance().getDistributionSummary(_appName + "-AppendData-Size", tags).record(cd.getData().length); } final Future<Boolean> future = client.append(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd); futures[index++] = new EVCacheFuture(future, key, _appName, client.getServerGroup()); } if (event != null) { event.setCachedData(cd); event.setTTL(timeToLive); endEvent(event); } touchData(evcKey, timeToLive, clients); return futures; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return new EVCacheFuture[0]; throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; //timer.record(duration, TimeUnit.MILLISECONDS); getTimer(Call.APPEND.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey); } } public <T> EVCacheFuture[] set(String key, T value, Transcoder<T> tc) throws EVCacheException { return this.set(key, value, tc, _timeToLive); } public <T> EVCacheFuture[] set(String key, T value, int timeToLive) throws EVCacheException { return this.set(key, value, (Transcoder<T>) _transcoder, timeToLive); } public <T> EVCacheFuture[] set(String key, T value) throws EVCacheException { return this.set(key, value, (Transcoder<T>) _transcoder, _timeToLive); } public EVCacheFuture[] delete(String key) throws EVCacheException { return this.deleteInternal(key, false); } protected EVCacheFuture[] deleteInternal(String key, boolean isOriginalKeyHashed) throws EVCacheException { final EVCacheLatch latch = this.deleteInternal(key, null, isOriginalKeyHashed); if (latch == null) return new EVCacheFuture[0]; final List<Future<Boolean>> futures = latch.getAllFutures(); if (futures == null || futures.isEmpty()) return new EVCacheFuture[0]; final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()]; for (int i = 0; i < futures.size(); i++) { final Future<Boolean> future = futures.get(i); if (future instanceof EVCacheFuture) { eFutures[i] = (EVCacheFuture) future; } else if (future instanceof EVCacheOperationFuture) { final EVCacheOperationFuture<Boolean> evfuture = (EVCacheOperationFuture<Boolean>)future; eFutures[i] = new EVCacheFuture(future, key, _appName, evfuture.getServerGroup(), evfuture.getEVCacheClient()); } else { eFutures[i] = new EVCacheFuture(future, key, _appName, null); } } return eFutures; } @Override public <T> EVCacheLatch delete(String key, Policy policy) throws EVCacheException { return this.deleteInternal(key, policy, false); } protected <T> EVCacheLatch deleteInternal(String key, Policy policy, boolean isOriginalKeyHashed) throws EVCacheException { if (key == null) throw new IllegalArgumentException("Key cannot be null"); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.DELETE); if (throwExc) throw new EVCacheException("Could not find a client to delete the keyAPP " + _appName + ", Key " + key); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.DELETE); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DELETE); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DELETE); return null; } startEvent(event); } String status = EVCacheMetricsFactory.SUCCESS; final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName); try { for (int i = 0; i < clients.length; i++) { Future<Boolean> future = clients[i].delete(isOriginalKeyHashed ? evcKey.getKey() : evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), latch); if (log.isDebugEnabled() && shouldLog()) log.debug("DELETE : APP " + _appName + ", Future " + future + " for key : " + evcKey); } if (event != null) { if(_eventsUsingLatchFP.get()) { latch.setEVCacheEvent(event); latch.scheduledFutureValidation(); } else { endEvent(event); } } return latch; } catch (Exception ex) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while deleting the data for APP " + _appName + ", key : " + key, ex); status = EVCacheMetricsFactory.ERROR; if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName); throw new EVCacheException("Exception while deleting the data for APP " + _appName + ", key : " + key, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.DELETE.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); //timer.record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("DELETE : APP " + _appName + " Took " + duration + " milliSec for key : " + key); } } public int getDefaultTTL() { return _timeToLive; } public long incr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException { if ((null == key) || by < 0 || defaultVal < 0 || timeToLive < 0) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.INCR); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.INCR); if (log.isDebugEnabled() && shouldLog()) log.debug("INCR : " + _metricPrefix + ":NULL_CLIENT"); if (throwExc) throw new EVCacheException("Could not find a client to incr the data"); return -1; } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.INCR); if (event != null) { event.setTTL(timeToLive); event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.INCR); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return -1; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.INCR); return -1; } startEvent(event); } String status = EVCacheMetricsFactory.SUCCESS; final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); long currentValue = -1; try { final long[] vals = new long[clients.length]; int index = 0; for (EVCacheClient client : clients) { vals[index] = client.incr(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), by, defaultVal, timeToLive); if (vals[index] != -1 && currentValue < vals[index]) { currentValue = vals[index]; if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " current value = " + currentValue + " for key : " + key + " from client : " + client); } index++; } if (currentValue != -1) { CachedData cd = null; if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " current value = " + currentValue + " for key : " + key); for (int i = 0; i < vals.length; i++) { if (vals[i] == -1 && currentValue > -1) { if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + "; Zone " + clients[i].getZone() + " had a value = -1 so setting it to current value = " + currentValue + " for key : " + key); clients[i].incr(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), 0, currentValue, timeToLive); } else if (vals[i] != currentValue) { if(cd == null) cd = clients[i].getTranscoder().encode(String.valueOf(currentValue)); if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + "; Zone " + clients[i].getZone() + " had a value of " + vals[i] + " so setting it to current value = " + currentValue + " for key : " + key); clients[i].set(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), cd, timeToLive); } } } if (event != null) endEvent(event); if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " returning value = " + currentValue + " for key : " + key); return currentValue; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception incrementing the value for APP " + _appName + ", key : " + key, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return -1; throw new EVCacheException("Exception incrementing value for APP " + _appName + ", key : " + key, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.INCR.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("INCR : APP " + _appName + ", Took " + duration + " milliSec for key : " + key + " with value as " + currentValue); } } public long decr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException { if ((null == key) || by < 0 || defaultVal < 0 || timeToLive < 0) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.DECR); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.DECR); if (log.isDebugEnabled() && shouldLog()) log.debug("DECR : " + _metricPrefix + ":NULL_CLIENT"); if (throwExc) throw new EVCacheException("Could not find a client to decr the data"); return -1; } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.DECR); if (event != null) { event.setTTL(timeToLive); event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DECR); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return -1; } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DECR); return -1; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; long currentValue = -1; try { final long[] vals = new long[clients.length]; int index = 0; for (EVCacheClient client : clients) { vals[index] = client.decr(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), by, defaultVal, timeToLive); if (vals[index] != -1 && currentValue < vals[index]) { currentValue = vals[index]; if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " current value = " + currentValue + " for key : " + key + " from client : " + client); } index++; } if (currentValue != -1) { CachedData cd = null; if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " current value = " + currentValue + " for key : " + key); for (int i = 0; i < vals.length; i++) { if (vals[i] == -1 && currentValue > -1) { if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + "; Zone " + clients[i].getZone() + " had a value = -1 so setting it to current value = " + currentValue + " for key : " + key); clients[i].decr(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), 0, currentValue, timeToLive); } else if (vals[i] != currentValue) { if(cd == null) cd = clients[i].getTranscoder().encode(currentValue); if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + "; Zone " + clients[i].getZone() + " had a value of " + vals[i] + " so setting it to current value = " + currentValue + " for key : " + key); clients[i].set(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), cd, timeToLive); } } } if (event != null) endEvent(event); if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " returning value = " + currentValue + " for key : " + key); return currentValue; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception decrementing the value for APP " + _appName + ", key : " + key, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return -1; throw new EVCacheException("Exception decrementing value for APP " + _appName + ", key : " + key, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.DECR.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("DECR : APP " + _appName + ", Took " + duration + " milliSec for key : " + key + " with value as " + currentValue); } } @Override public <T> EVCacheLatch replace(String key, T value, Policy policy) throws EVCacheException { return replace(key, value, (Transcoder<T>) _transcoder, policy); } @Override public <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, Policy policy) throws EVCacheException { return replace(key, value, (Transcoder<T>) _transcoder, _timeToLive, policy); } public <T> EVCacheLatch replace(String key, T value, int timeToLive, Policy policy) throws EVCacheException { return replace(key, value, (Transcoder<T>)_transcoder, timeToLive, policy); } @Override public <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException { if ((null == key) || (null == value)) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.REPLACE); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.REPLACE); if (throwExc) throw new EVCacheException("Could not find a client to set the data"); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.REPLACE); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.REPLACE); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheLatchImpl(policy, 0, _appName); } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.REPLACE); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName); try { final EVCacheFuture[] futures = new EVCacheFuture[clients.length]; CachedData cd = null; int index = 0; for (EVCacheClient client : clients) { if (tc != null) { cd = tc.encode(value); } else if (_transcoder != null) { cd = ((Transcoder<Object>) _transcoder).encode(value); } else { cd = client.getTranscoder().encode(value); } if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) { final EVCacheValue val = new EVCacheValue(evcKey.getCanonicalKey(client.isDuetClient()), cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis()); cd = evcacheValueTranscoder.encode(val); } final Future<Boolean> future = client.replace(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd, timeToLive, latch); futures[index++] = new EVCacheFuture(future, key, _appName, client.getServerGroup()); } if (event != null) { event.setTTL(timeToLive); event.setCachedData(cd); if(_eventsUsingLatchFP.get()) { latch.setEVCacheEvent(event); latch.scheduledFutureValidation(); } else { endEvent(event); } } return latch; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName); throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.REPLACE.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("REPLACE : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey); } } @Override public String getCachePrefix() { return _cacheName; } public String getAppName() { return _appName; } public String getCacheName() { return _cacheName; } public <T> EVCacheLatch appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException { if ((null == key) || (null == value)) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.APPEND_OR_ADD); final boolean throwExc = doThrowException(); final EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.APPEND_OR_ADD); if (throwExc) throw new EVCacheException("Could not find a client to appendOrAdd the data"); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.APPEND_OR_ADD); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND_OR_ADD); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND_OR_ADD); return null; } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName); String status = EVCacheMetricsFactory.SUCCESS; try { CachedData cd = null; for (EVCacheClient client : clients) { // ensure key hashing is not enabled if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) { throw new IllegalArgumentException("appendOrAdd is not supported when key hashing is enabled."); } if (cd == null) { if (tc != null) { cd = tc.encode(value); } else if ( _transcoder != null) { cd = ((Transcoder<Object>)_transcoder).encode(value); } else { cd = client.getTranscoder().encode(value); } } final Future<Boolean> future = client.appendOrAdd(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd, timeToLive, latch); if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND_OR_ADD : APP " + _appName + ", Future " + future + " for key : " + evcKey); } if (event != null) { event.setTTL(timeToLive); event.setCachedData(cd); if(_eventsUsingLatchFP.get()) { latch.setEVCacheEvent(event); latch.scheduledFutureValidation(); } else { endEvent(event); } } return latch; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while appendOrAdd the data for APP " + _appName + ", key : " + evcKey, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName); throw new EVCacheException("Exception while appendOrAdd data for APP " + _appName + ", key : " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.APPEND_OR_ADD.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND_OR_ADD : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey); } } public <T> Future<Boolean>[] appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException { final EVCacheLatch latch = this.appendOrAdd(key, value, tc, timeToLive, Policy.ALL_MINUS_1); if(latch != null) return latch.getAllFutures().toArray(new Future[latch.getAllFutures().size()]); return new EVCacheFuture[0]; } public <T> boolean add(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException { final EVCacheLatch latch = add(key, value, tc, timeToLive, Policy.NONE); try { latch.await(_pool.getOperationTimeout().get(), TimeUnit.MILLISECONDS); final List<Future<Boolean>> allFutures = latch.getAllFutures(); for(Future<Boolean> future : allFutures) { if(!future.get()) return false; } return true; } catch (InterruptedException e) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + key, e); final boolean throwExc = doThrowException(); if(throwExc) throw new EVCacheException("Exception add data for APP " + _appName + ", key : " + key, e); return false; } catch (ExecutionException e) { if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + key, e); final boolean throwExc = doThrowException(); if(throwExc) throw new EVCacheException("Exception add data for APP " + _appName + ", key : " + key, e); return false; } } @Override public <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException { EVCacheClient[] clients = _pool.getEVCacheClientForWrite(); EVCacheClient[] writeOnlyClients = _pool.getWriteOnlyEVCacheClients(); // In case of adds , we skip adds to the pool if value is already present in the 1st client // Sorting to make sure the 1st element of the list is a read/write client and not just write-only client EVCacheClient[] sortedClients = sortClients(clients, writeOnlyClients); return this.add(key, value, tc, timeToLive, policy, sortedClients, clients.length - _pool.getWriteOnlyEVCacheClients().length); } public EVCacheClient[] sortClients(EVCacheClient[] clients, EVCacheClient[] writeOnlyClients) { List<EVCacheClient> writeOnlyClientsList = Arrays.asList(writeOnlyClients); List<EVCacheClient> clientList = Arrays.stream(clients).sorted((s1, s2) -> { if (writeOnlyClientsList.contains(s1)) return 1; return -1; }).collect(Collectors.toList()); return clientList.stream().toArray(EVCacheClient[]::new); } protected <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount) throws EVCacheException { return add(key, value, tc, timeToLive, policy, clients, latchCount, true); } protected <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount, boolean fixup) throws EVCacheException { if ((null == key) || (null == value)) throw new IllegalArgumentException(); checkTTL(timeToLive, Call.ADD); final boolean throwExc = doThrowException(); if (clients.length == 0) { incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.ADD); if (throwExc) throw new EVCacheException("Could not find a client to Add the data"); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } final EVCacheKey evcKey = getEVCacheKey(key); final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.ADD); if (event != null) { event.setEVCacheKeys(Arrays.asList(evcKey)); try { if (shouldThrottle(event)) { incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ADD); if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } } catch(EVCacheException ex) { if(throwExc) throw ex; incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ADD); return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure } startEvent(event); } final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime(); String status = EVCacheMetricsFactory.SUCCESS; EVCacheLatch latch = null; try { CachedData cd = null; if (tc != null) { cd = tc.encode(value); } else if (_transcoder != null) { cd = ((Transcoder<Object>) _transcoder).encode(value); } else { cd = _pool.getEVCacheClientForRead().getTranscoder().encode(value); } if (clientUtil == null) clientUtil = new EVCacheClientUtil(_appName, _pool.getOperationTimeout().get()); latch = clientUtil.add(evcKey, cd, evcacheValueTranscoder, timeToLive, policy, clients, latchCount, fixup); if (event != null) { event.setTTL(timeToLive); event.setCachedData(cd); if (_eventsUsingLatchFP.get()) { latch.setEVCacheEvent(event); if (latch instanceof EVCacheLatchImpl) ((EVCacheLatchImpl) latch).scheduledFutureValidation(); } else { endEvent(event); } } return latch; } catch (Exception ex) { status = EVCacheMetricsFactory.ERROR; if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + evcKey, ex); if (event != null) { event.setStatus(status); eventError(event, ex); } if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName); throw new EVCacheException("Exception adding data for APP " + _appName + ", key : " + evcKey, ex); } finally { final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start; getTimer(Call.ADD.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS); if (log.isDebugEnabled() && shouldLog()) log.debug("ADD : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey); } } private DistributionSummary getTTLDistributionSummary(String operation, String type, String metric) { DistributionSummary distributionSummary = distributionSummaryMap.get(operation); if(distributionSummary != null) return distributionSummary; final List<Tag> tagList = new ArrayList<Tag>(6); tagList.addAll(tags); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation)); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, type)); distributionSummary = EVCacheMetricsFactory.getInstance().getDistributionSummary(metric, tagList); distributionSummaryMap.put(operation, distributionSummary); return distributionSummary; } private Timer getTimer(String operation, String operationType, String hit, String status, int tries, long duration, ServerGroup serverGroup) { String name = ((hit != null) ? operation + hit : operation); if(status != null) name += status; if(tries >= 0) name += tries; if(serverGroup != null) name += serverGroup.getName(); //if(_cacheName != null) name += _cacheName; Timer timer = timerMap.get(name); if(timer != null) return timer; final List<Tag> tagList = new ArrayList<Tag>(7); tagList.addAll(tags); if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation)); if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType)); if(status != null) tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, status)); if(hit != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE_HIT, hit)); switch(tries) { case 0 : case 1 : tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.INITIAL)); break; case 2 : tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.SECOND)); break; default: tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.THIRD_UP)); break; } // if(tries == 0) tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, String.valueOf(tries))); if(serverGroup != null) { tagList.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName())); tagList.add(new BasicTag(EVCacheMetricsFactory.ZONE, serverGroup.getZone())); } timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.OVERALL_CALL, tagList, Duration.ofMillis(duration)); timerMap.put(name, timer); return timer; } protected List<Tag> getTags() { return tags; } }
4,001
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheKey.java
package com.netflix.evcache; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.evcache.util.KeyHasher; import com.netflix.evcache.util.KeyHasher.HashingAlgorithm; public class EVCacheKey { private static final Logger log = LoggerFactory.getLogger(EVCacheKey.class); private final String appName; private final HashingAlgorithm hashingAlgorithmAtAppLevel; private final Property<Boolean> shouldEncodeHashKeyAtAppLevel; private final Property<Integer> maxDigestBytesAtAppLevel; private final Property<Integer> maxHashLengthAtAppLevel; private final String key; private final String canonicalKey; private String canonicalKeyForDuet; // Note that this we cache hashed keys based on Hashing Algorithm alone, but not based on other hashing properties // like max.hash.length. So changing max.hash.length alone would not necessarily trigger hash recalculation, but // one would have to change the hashing algorithm in order to having hashing properties taken into account. // This is to make such a hashing property change very obvious and not subtle. private final Map<String, String> hashedKeysByAlgorithm; private final Map<String, String> hashedKeysByAlgorithmForDuet; private final String encoder; public EVCacheKey(String appName, String key, String canonicalKey, HashingAlgorithm hashingAlgorithmAtAppLevel, Property<Boolean> shouldEncodeHashKeyAtAppLevel, Property<Integer> maxDigestBytesAtAppLevel, Property<Integer> maxHashLengthAtAppLevel) { this(appName, key, canonicalKey, hashingAlgorithmAtAppLevel, shouldEncodeHashKeyAtAppLevel, maxDigestBytesAtAppLevel, maxHashLengthAtAppLevel, null); } public EVCacheKey(String appName, String key, String canonicalKey, HashingAlgorithm hashingAlgorithmAtAppLevel, Property<Boolean> shouldEncodeHashKeyAtAppLevel, Property<Integer> maxDigestBytesAtAppLevel, Property<Integer> maxHashLengthAtAppLevel, String encoder) { super(); this.appName = appName; this.key = key; this.canonicalKey = canonicalKey; this.hashingAlgorithmAtAppLevel = hashingAlgorithmAtAppLevel; this.shouldEncodeHashKeyAtAppLevel = shouldEncodeHashKeyAtAppLevel; this.maxDigestBytesAtAppLevel = maxDigestBytesAtAppLevel; this.maxHashLengthAtAppLevel = maxHashLengthAtAppLevel; this.encoder = encoder; hashedKeysByAlgorithm = new HashMap<>(); hashedKeysByAlgorithmForDuet = new HashMap<>(); } public String getKey() { return key; } @Deprecated public String getCanonicalKey() { return canonicalKey; } public String getCanonicalKey(boolean isDuet) { return isDuet ? getCanonicalKeyForDuet() : canonicalKey; } private String getCanonicalKeyForDuet() { if (null == canonicalKeyForDuet) { final int duetKeyLength = appName.length() + 1 + canonicalKey.length(); canonicalKeyForDuet = new StringBuilder(duetKeyLength).append(appName).append(':').append(canonicalKey).toString(); if (log.isDebugEnabled()) log.debug("canonicalKeyForDuet : " + canonicalKeyForDuet); } return canonicalKeyForDuet; } @Deprecated public String getHashKey() { return getHashKey(hashingAlgorithmAtAppLevel, null == shouldEncodeHashKeyAtAppLevel ? null : shouldEncodeHashKeyAtAppLevel.get(), null == maxDigestBytesAtAppLevel ? null : maxDigestBytesAtAppLevel.get(), null == maxHashLengthAtAppLevel ? null : maxHashLengthAtAppLevel.get(), encoder); } // overlays app level hashing and client level hashing public String getHashKey(boolean isDuet, HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String baseEnoder) { if (hashingAlgorithm == HashingAlgorithm.NO_HASHING) { return null; } if (null == hashingAlgorithm) { hashingAlgorithm = hashingAlgorithmAtAppLevel; } if (null == shouldEncodeHashKey) { shouldEncodeHashKey = this.shouldEncodeHashKeyAtAppLevel.get(); } if (null == maxDigestBytes) { maxDigestBytes = this.maxDigestBytesAtAppLevel.get(); } if (null == maxHashLength) { maxHashLength = this.maxHashLengthAtAppLevel.get(); } if(null == baseEnoder) { baseEnoder = encoder; } final String rKey = isDuet ? getHashKeyForDuet(hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder) : getHashKey(hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder); if (log.isDebugEnabled()) log.debug("Key : " + rKey); return rKey; } // overlays app level hashing algorithm and client level hashing algorithm public String getDerivedKey(boolean isDuet, HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String baseEnoder) { // this overlay of hashingAlgorithm helps determine if there at all needs to be hashing performed, otherwise, will return canonical key if (null == hashingAlgorithm) { hashingAlgorithm = hashingAlgorithmAtAppLevel; } final String derivedKey = null == hashingAlgorithm || hashingAlgorithm == HashingAlgorithm.NO_HASHING ? getCanonicalKey(isDuet) : getHashKey(isDuet, hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder); if (log.isDebugEnabled()) log.debug("derivedKey : " + derivedKey); return derivedKey; } private String getHashKey(HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String encoder) { if (null == hashingAlgorithm) { return null; } final String key = hashingAlgorithm.toString()+ maxDigestBytes != null ? maxDigestBytes.toString() : "-" + maxHashLength != null ? maxHashLength.toString() : "-" + encoder != null ? encoder : "-"; String val = hashedKeysByAlgorithm.get(key); if(val == null) { val = KeyHasher.getHashedKeyEncoded(getCanonicalKey(false), hashingAlgorithm, maxDigestBytes, maxHashLength, encoder); hashedKeysByAlgorithm.put(key , val); } if (log.isDebugEnabled()) log.debug("getHashKey : " + val); // TODO: Once the issue around passing hashedKey in bytes[] is figured, we will start using (nullable) shouldEncodeHashKey, and call KeyHasher.getHashedKeyInBytes() accordingly return val; } private String getHashKeyForDuet(HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String encoder) { if (null == hashingAlgorithm) { return null; } final String key = hashingAlgorithm.toString()+ maxDigestBytes != null ? maxDigestBytes.toString() : "-" + maxHashLength != null ? maxHashLength.toString() : "-" + encoder != null ? encoder : "-"; String val = hashedKeysByAlgorithmForDuet.get(key); if(val == null) { val = KeyHasher.getHashedKeyEncoded(getCanonicalKeyForDuet(), hashingAlgorithm, maxDigestBytes, maxHashLength, encoder); hashedKeysByAlgorithmForDuet.put(key , val); } if (log.isDebugEnabled()) log.debug("getHashKeyForDuet : " + val); // TODO: Once the issue around passing hashedKey in bytes[] is figured, we will start using (nullable) shouldEncodeHashKey, and call KeyHasher.getHashedKeyInBytes() accordingly return val; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((canonicalKey == null) ? 0 : canonicalKey.hashCode()); result = prime * result + ((canonicalKeyForDuet == null) ? 0 : canonicalKeyForDuet.hashCode()); result = prime * result + ((key == null) ? 0 : key.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EVCacheKey other = (EVCacheKey) obj; if (canonicalKey == null) { if (other.canonicalKey != null) return false; } else if (!canonicalKey.equals(other.canonicalKey)) return false; if (canonicalKeyForDuet == null) { if (other.canonicalKeyForDuet != null) return false; } else if (!canonicalKeyForDuet.equals(other.canonicalKeyForDuet)) return false; if (key == null) { if (other.key != null) return false; } else if (!key.equals(other.key)) return false; return true; } @Override public String toString() { return "EVCacheKey [key=" + key + ", canonicalKey=" + canonicalKey + ", canonicalKeyForDuet=" + canonicalKeyForDuet + (hashedKeysByAlgorithm.size() > 0 ? ", hashedKeysByAlgorithm=" + hashedKeysByAlgorithm.toString() : "") + (hashedKeysByAlgorithmForDuet.size() > 0 ? ", hashedKeysByAlgorithmForDuet=" + hashedKeysByAlgorithmForDuet.toString() + "]" : "]"); } }
4,002
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheClientPoolConfigurationProperties.java
package com.netflix.evcache; import java.time.Duration; public class EVCacheClientPoolConfigurationProperties { /** * Prefix to be applied to keys. */ private String keyPrefix; /** * Time-to-live in seconds. */ private Duration timeToLive; /** * Whether or not retry is to be enabled. */ private Boolean retryEnabled = true; /** * Whether or not exception throwing is to be enabled. */ private Boolean exceptionThrowingEnabled = false; public EVCacheClientPoolConfigurationProperties() { this.keyPrefix = ""; this.timeToLive = Duration.ofSeconds(900); this.retryEnabled = true; this.exceptionThrowingEnabled = false; } public String getKeyPrefix() { return keyPrefix; } public void setKeyPrefix(String keyPrefix) { this.keyPrefix = keyPrefix; } public Duration getTimeToLive() { return timeToLive; } public void setTimeToLive(Duration timeToLive) { this.timeToLive = timeToLive; } public Boolean getRetryEnabled() { return retryEnabled; } public void setRetryEnabled(Boolean retryEnabled) { this.retryEnabled = retryEnabled; } public Boolean getExceptionThrowingEnabled() { return exceptionThrowingEnabled; } public void setExceptionThrowingEnabled(Boolean exceptionThrowingEnabled) { this.exceptionThrowingEnabled = exceptionThrowingEnabled; } }
4,003
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheException.java
package com.netflix.evcache; public class EVCacheException extends Exception { private static final long serialVersionUID = -3885811159646046383L; public EVCacheException(String message) { super(message); } public EVCacheException(String message, Throwable cause) { super(message, cause); } }
4,004
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheLatch.java
package com.netflix.evcache; import java.util.List; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import com.netflix.evcache.event.EVCacheEvent; import net.spy.memcached.internal.OperationCompletionListener; /** * EVCacheLatch is a blocking mechanism that allows one or more threads to wait until * a set of operations as specified by {@Link Policy} performed by evcache threads are complete. * * <p>The Latch is initialized with a <em>count</em> as determined by the Policy. * The {@link #await await} methods block until the current count reaches * zero due to completion of the operation, after which * all waiting threads are released and any subsequent invocations of * {@link #await await} return immediately. * * The latch is also released if the specified timeout is reached even though the count is greater than zero. * In this case the {@link #await await} method returns false * * The various methods in latch can be queried any time and they return the state of the operations across the Futures. */ public interface EVCacheLatch extends OperationCompletionListener { /** * The Policy which can be used to control the latch behavior. The latch is released when the number operations as specified by the Policy are completed. * For example: If your evcache app has 3 copies (3 server groups) in a region then each write done on that app will perform 3 operations (one for each copy/server group). * If you are doing a set operation and the selected Policy is ALL_MINUS_1 then we need to complete 2 operations(set on 2 copies/server groups) need to be finished before we release the latch. * * Note that an Operation completed means that the operation was accepted by evcache or rejected by evcache. * If it is still in flight the that operation is in pending state. * * Case ALL : All the operations have to be completed. * Case All_MINUS_1 : All but one needs to be completed. For ex: If there are 3 copies for a cache then 2 need to be completed. * Case QUORUM: Quorum number of operations have to be completed before we release the latch: for a cluster with 3 this means 2 operations need to be completed. * Case ONE: At least one operations needs to be completed before we release the latch. * Case NONE: The latch is released immediately. * * @author smadappa * */ public static enum Policy { NONE, ONE, QUORUM, ALL_MINUS_1, ALL } /** * Causes the current thread to wait until the latch has counted down to * zero, unless the thread is interrupted, or the specified waiting time * elapses. * * @param timeout * - the maximum time to wait * @param unit * - the time unit of the timeout argument * * @return - {@code true} if the count reached zero and false if the waiting * time elapsed before the count reached zero * @throws InterruptedException * if the current thread is interrupted while waiting */ boolean await(long timeout, TimeUnit unit) throws InterruptedException; /** * Returns {@code true} if this all the tasks assigned for this Latch * completed. * * Completion may be due to normal termination, an exception, or * cancellation -- in all of these cases, this method will return * {@code true}. * * @return {@code true} if all the tasks completed */ boolean isDone(); /** * Returns the Futures backing the Pending tasks. * * @return the current outstanding tasks */ List<Future<Boolean>> getPendingFutures(); /** * Returns all the Tasks. * * @return the tasks submitted part of this Latch */ List<Future<Boolean>> getAllFutures(); /** * Returns all the completed Tasks. * * @return the current completed tasks */ List<Future<Boolean>> getCompletedFutures(); /** * Returns the number of Futures that are still Pending. * * @return the current outstanding Future task count */ int getPendingFutureCount(); /** * Returns the number of Future Tasks that are completed. * * @return the current completed future task count */ int getCompletedFutureCount(); /** * Returns the number of Tasks that are still Pending. * * @return the current outstanding task count */ int getPendingCount(); /** * Returns the number of Tasks that are completed. A task is completed if * the task was finished either success of failure. The task is considered * failure if it times out or there was an exception. * * @return the completed task count */ int getCompletedCount(); /** * Returns the number of Tasks that failed to complete. There was either an * exception or the task was cancelled. * * @return the failed task count */ int getFailureCount(); /** * Returns the number of Tasks that need to be successfully completed based * on the Specified Policy before the latch can be released. * * @return the expected success count * @deprecated replaced by {@link #getExpectedCompleteCount()} */ int getExpectedSuccessCount(); /** * Returns the number of Tasks that need to be successfully completed based * on the Specified Policy before the latch can be released. * * @return the expected success count */ int getExpectedCompleteCount(); /** * Returns the current number of Tasks that are successful . * * @return the current Successful Task count. */ int getSuccessCount(); /** * The {@code Policy} for this Latch * * @return the Latch. */ Policy getPolicy(); /** * Returns {@code true} if the operation is a Fast failure i.e. the operation was not even performed. * * @return {@code true} upon fast failure else false. */ boolean isFastFailure(); /** * The event associated with this Latch * * @return the EVCacheEvent associated with this latch or null if there is none. */ void setEVCacheEvent(EVCacheEvent event); }
4,005
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheConnectException.java
package com.netflix.evcache; import java.io.IOException; public class EVCacheConnectException extends IOException { private static final long serialVersionUID = 8065483548278456469L; public EVCacheConnectException(String message) { super(message); } public EVCacheConnectException(String message, Throwable cause) { super(message, cause); } }
4,006
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheGetOperationListener.java
package com.netflix.evcache; import com.netflix.evcache.operation.EVCacheOperationFuture; import net.spy.memcached.internal.GenericCompletionListener; public interface EVCacheGetOperationListener<T> extends GenericCompletionListener<EVCacheOperationFuture<T>> { }
4,007
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheMissException.java
package com.netflix.evcache; public class EVCacheMissException extends EVCacheException { private static final long serialVersionUID = 222337840463312890L; public EVCacheMissException(String message) { super(message); } public EVCacheMissException(String message, Throwable cause) { super(message, cause); } }
4,008
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheInMemoryCache.java
package com.netflix.evcache; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import com.netflix.archaius.api.Property; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.CacheStats; import com.google.common.cache.LoadingCache; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListenableFutureTask; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Gauge; import com.netflix.spectator.api.Id; import com.netflix.spectator.api.Tag; import net.spy.memcached.transcoders.Transcoder; /** * An In Memory cache that can be used to hold data for short duration. This is * helpful when the same key is repeatedly requested from EVCache within a short * duration. This can be turned on dynamically and can relive pressure on * EVCache Server instances. */ public class EVCacheInMemoryCache<T> { private static final Logger log = LoggerFactory.getLogger(EVCacheInMemoryCache.class); private final Property<Integer> _cacheDuration; // The key will be cached for this long private final Property<Integer> _refreshDuration, _exireAfterAccessDuration; private final Property<Integer> _cacheSize; // This many items will be cached private final Property<Integer> _poolSize; // This many threads will be initialized to fetch data from evcache async private final String appName; private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>(); private final Map<String, Gauge> gaugeMap = new ConcurrentHashMap<String, Gauge>(); private LoadingCache<EVCacheKey, Optional<T>> cache; private ExecutorService pool = null; private final Transcoder<T> tc; private final EVCacheImpl impl; private final Id sizeId; public EVCacheInMemoryCache(String appName, Transcoder<T> tc, EVCacheImpl impl) { this.appName = appName; this.tc = tc; this.impl = impl; this._cacheDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.expire.after.write.duration.ms", Integer.class).orElseGet(appName + ".inmemory.cache.duration.ms").orElse(0); this._cacheDuration.subscribe((i) -> setupCache()); this._exireAfterAccessDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.expire.after.access.duration.ms", Integer.class).orElse(0); this._exireAfterAccessDuration.subscribe((i) -> setupCache());; this._refreshDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.refresh.after.write.duration.ms", Integer.class).orElse(0); this._refreshDuration.subscribe((i) -> setupCache()); this._cacheSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.cache.size", Integer.class).orElse(100); this._cacheSize.subscribe((i) -> setupCache()); this._poolSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".thread.pool.size", Integer.class).orElse(5); this._poolSize.subscribe((i) -> initRefreshPool()); final List<Tag> tags = new ArrayList<Tag>(3); tags.addAll(impl.getTags()); tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, "size")); this.sizeId = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.IN_MEMORY, tags); setupCache(); setupMonitoring(appName); } private WriteLock writeLock = new ReentrantReadWriteLock().writeLock(); private void initRefreshPool() { final ExecutorService oldPool = pool; writeLock.lock(); try { final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat( "EVCacheInMemoryCache-%d").build(); pool = Executors.newFixedThreadPool(_poolSize.get(), factory); if(oldPool != null) oldPool.shutdown(); } finally { writeLock.unlock(); } } private void setupCache() { try { CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder().recordStats(); if(_cacheSize.get() > 0) { builder = builder.maximumSize(_cacheSize.get()); } if(_exireAfterAccessDuration.get() > 0) { builder = builder.expireAfterAccess(_exireAfterAccessDuration.get(), TimeUnit.MILLISECONDS); } else if(_cacheDuration.get().intValue() > 0) { builder = builder.expireAfterWrite(_cacheDuration.get(), TimeUnit.MILLISECONDS); } if(_refreshDuration.get() > 0) { builder = builder.refreshAfterWrite(_refreshDuration.get(), TimeUnit.MILLISECONDS); } initRefreshPool(); final LoadingCache<EVCacheKey, Optional<T>> newCache = builder.build( new CacheLoader<EVCacheKey, Optional<T>>() { public Optional<T> load(EVCacheKey key) throws EVCacheException, DataNotFoundException { try { return Optional.fromNullable(impl.doGet(key, tc)); } catch (EVCacheException e) { log.error("EVCacheException while loading key -> "+ key, e); throw e; } catch (Exception e) { log.error("EVCacheException while loading key -> "+ key, e); throw new EVCacheException("key : " + key + " could not be loaded", e); } } @Override public ListenableFuture<Optional<T>> reload(EVCacheKey key, Optional<T> oldValue) { ListenableFutureTask<Optional<T>> task = ListenableFutureTask.create(new Callable<Optional<T>>() { public Optional<T> call() { try { final Optional<T> t = load(key); if(t == null) { EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-NotFound"); return oldValue; } else { EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-Success"); } return t; } catch (EVCacheException e) { log.error("EVCacheException while reloading key -> "+ key, e); EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-Fail"); return oldValue; } } }); pool.execute(task); return task; } }); if(cache != null) newCache.putAll(cache.asMap()); final Cache<EVCacheKey, Optional<T>> currentCache = this.cache; this.cache = newCache; if(currentCache != null) { currentCache.invalidateAll(); currentCache.cleanUp(); } } catch (Exception e) { log.error(e.getMessage(), e); } } private CacheStats previousStats = null; private long getSize() { final long size = cache.size(); final CacheStats stats = cache.stats(); if(previousStats != null) { try { getCounter("hits").increment(stats.hitCount() - previousStats.hitCount()); getCounter("miss").increment(stats.missCount() - previousStats.missCount()); getCounter("evictions").increment(stats.evictionCount() - previousStats.evictionCount()); getCounter("requests").increment(stats.requestCount() - previousStats.requestCount()); getCounter("loadExceptionCount").increment(stats.loadExceptionCount() - previousStats.loadExceptionCount()); getCounter("loadCount").increment(stats.loadCount() - previousStats.loadCount()); getCounter("loadSuccessCount").increment(stats.loadSuccessCount() - previousStats.loadSuccessCount()); getCounter("totalLoadTime-ms").increment(( stats.totalLoadTime() - previousStats.totalLoadTime())/1000000); getGauge("hitrate").set(stats.hitRate()); getGauge("loadExceptionRate").set(stats.loadExceptionRate()); getGauge("averageLoadTime-ms").set(stats.averageLoadPenalty()/1000000); } catch(Exception e) { log.error("Error while reporting stats", e); } } previousStats = stats; return size; } @SuppressWarnings("deprecation") private void setupMonitoring(final String appName) { EVCacheMetricsFactory.getInstance().getRegistry().gauge(sizeId, this, EVCacheInMemoryCache::getSize); } private Counter getCounter(String name) { Counter counter = counterMap.get(name); if(counter != null) return counter; final List<Tag> tags = new ArrayList<Tag>(3); tags.addAll(impl.getTags()); tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, name)); counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.IN_MEMORY, tags); counterMap.put(name, counter); return counter; } private Gauge getGauge(String name) { Gauge gauge = gaugeMap.get(name); if(gauge != null) return gauge; final List<Tag> tags = new ArrayList<Tag>(3); tags.addAll(impl.getTags()); tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, name)); final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.IN_MEMORY, tags); gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id); gaugeMap.put(name, gauge); return gauge; } public T get(EVCacheKey key) throws ExecutionException { if (cache == null) return null; final Optional<T> val = cache.get(key); if(!val.isPresent()) return null; if (log.isDebugEnabled()) log.debug("GET : appName : " + appName + "; Key : " + key + "; val : " + val); return val.get(); } public void put(EVCacheKey key, T value) { if (cache == null) return; cache.put(key, Optional.fromNullable(value)); if (log.isDebugEnabled()) log.debug("PUT : appName : " + appName + "; Key : " + key + "; val : " + value); } public void delete(String key) { if (cache == null) return; cache.invalidate(key); if (log.isDebugEnabled()) log.debug("DEL : appName : " + appName + "; Key : " + key); } public Map<EVCacheKey, Optional<T>> getAll() { if (cache == null) return Collections.<EVCacheKey, Optional<T>>emptyMap(); return cache.asMap(); } public static final class DataNotFoundException extends EVCacheException { private static final long serialVersionUID = 1800185311509130263L; public DataNotFoundException(String message) { super(message); } } }
4,009
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheInternal.java
package com.netflix.evcache; import com.netflix.evcache.operation.EVCacheItem; import com.netflix.evcache.operation.EVCacheItemMetaData; import com.netflix.evcache.pool.EVCacheClientPoolManager; import net.spy.memcached.CachedData; import net.spy.memcached.MemcachedNode; import net.spy.memcached.transcoders.Transcoder; import java.util.List; import java.util.Map; import java.util.concurrent.Future; public interface EVCacheInternal extends EVCache { EVCacheItem<CachedData> metaGet(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException; Map<MemcachedNode, CachedValues> metaGetPerClient(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException; EVCacheItemMetaData metaDebug(String key, boolean isOriginalKeyHashed) throws EVCacheException; Map<MemcachedNode, EVCacheItemMetaData> metaDebugPerClient(String key, boolean isOriginalKeyHashed) throws EVCacheException; Future<Boolean>[] delete(String key, boolean isOriginalKeyHashed) throws EVCacheException; EVCacheLatch addOrSetToWriteOnly(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException; EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups) throws EVCacheException; EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroup) throws EVCacheException; EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName, List<String> destinationIps) throws EVCacheException; KeyHashedState isKeyHashed(String appName, String serverGroup); public enum KeyHashedState { YES, NO, MAYBE } public static class CachedValues { private final String key; private final CachedData data; private EVCacheItemMetaData itemMetaData; public CachedValues(String key, CachedData data, EVCacheItemMetaData itemMetaData) { this.key = key; this.data = data; this.itemMetaData = itemMetaData; } public String getKey() { return key; } public CachedData getData() { return data; } public EVCacheItemMetaData getEVCacheItemMetaData() { return itemMetaData; } } public class Builder extends EVCache.Builder { public Builder() { super(); } @Override protected EVCache newImpl(String appName, String cachePrefix, int ttl, Transcoder<?> transcoder, boolean serverGroupRetry, boolean enableExceptionThrowing, EVCacheClientPoolManager poolManager) { return new EVCacheInternalImpl(appName, cachePrefix, ttl, transcoder, serverGroupRetry, enableExceptionThrowing, poolManager); } } }
4,010
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheTranscoder.java
package com.netflix.evcache; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.CachedData; public class EVCacheTranscoder extends EVCacheSerializingTranscoder { public EVCacheTranscoder() { this(EVCacheConfig.getInstance().getPropertyRepository().get("default.evcache.max.data.size", Integer.class).orElse(20 * 1024 * 1024).get()); } public EVCacheTranscoder(int max) { this(max, EVCacheConfig.getInstance().getPropertyRepository().get("default.evcache.compression.threshold", Integer.class).orElse(120).get()); } public EVCacheTranscoder(int max, int compressionThreshold) { super(max); setCompressionThreshold(compressionThreshold); } @Override public boolean asyncDecode(CachedData d) { return super.asyncDecode(d); } @Override public Object decode(CachedData d) { return super.decode(d); } @Override public CachedData encode(Object o) { if (o != null && o instanceof CachedData) return (CachedData) o; return super.encode(o); } }
4,011
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheInternalImpl.java
package com.netflix.evcache; import com.netflix.archaius.api.PropertyRepository; import com.netflix.evcache.operation.EVCacheItem; import com.netflix.evcache.operation.EVCacheItemMetaData; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.pool.EVCacheValue; import com.netflix.evcache.pool.ServerGroup; import net.spy.memcached.CachedData; import net.spy.memcached.MemcachedNode; import net.spy.memcached.transcoders.Transcoder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.InetSocketAddress; import java.util.*; import java.util.concurrent.Future; import java.util.stream.Collectors; /** * This class is for internal-use only by EVCache components, and is not recommended to be used for any other purpose. EVCache and EVCacheImpl are recommended instead. */ class EVCacheInternalImpl extends EVCacheImpl implements EVCacheInternal { private static final Logger log = LoggerFactory.getLogger(EVCacheInternalImpl.class); public EVCacheItem<CachedData> metaGet(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException { return this.metaGetInternal(key, tc, isOriginalKeyHashed); } public Map<MemcachedNode, CachedValues> metaGetPerClient(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException { Map<MemcachedNode, CachedValues> map = new HashMap<>(); final Map<ServerGroup, List<EVCacheClient>> instancesByZone = _pool.getAllInstancesByZone(); final Map<ServerGroup, EVCacheClient> instancesWithNull = new HashMap<ServerGroup, EVCacheClient>(); final EVCacheKey evcKey = getEVCacheKey(key); for (ServerGroup sGroup : instancesByZone.keySet()) { try { for (EVCacheClient client : instancesByZone.get(sGroup)) { EVCacheItem<CachedData> item = getEVCacheItem(client, evcKey, tc, true, false, isOriginalKeyHashed, false); if (log.isDebugEnabled()) log.debug("client : " + client + "; item : " + item); if(item == null) { instancesWithNull.put(sGroup, client); } else { map.put(client.getNodeLocator().getPrimary(key), null == item ? null : new CachedValues(key, item.getData(), item.getItemMetaData())); } } } catch (Exception e) { log.error("Error getting meta data", e); } } if (log.isDebugEnabled()) log.debug("map : " + map); if (log.isDebugEnabled()) log.debug("instancesWithNull : " + instancesWithNull); if(instancesWithNull.size() > 0 && map.size() > 0) { final EVCacheTranscoder transcoder = new EVCacheTranscoder(); String originalKey = null; for(CachedValues vals : map.values()) { if (log.isDebugEnabled()) log.debug("vals : " + vals); try { Object obj = transcoder.decode(vals.getData()); if (log.isDebugEnabled()) log.debug("Obj : " + obj); if(obj instanceof EVCacheValue) { originalKey = ((EVCacheValue)obj).getKey(); if (log.isDebugEnabled()) log.debug("original key: " + originalKey); break; } } catch(Exception e) { log.error("Exception decoding", e); } } if(originalKey != null) { for(ServerGroup sGroup : instancesWithNull.keySet()) { if (log.isDebugEnabled()) log.debug("sGroup : " + sGroup); final EVCacheClient client = instancesWithNull.get(sGroup); if (log.isDebugEnabled()) log.debug("Client : " + client); EVCacheItem<CachedData> item; try { item = getEVCacheItem(client, getEVCacheKey(originalKey), tc, true, false, false, false); if (log.isDebugEnabled()) log.debug("item : " + item); map.put(client.getNodeLocator().getPrimary(originalKey), null == item ? null : new CachedValues(key, item.getData(), item.getItemMetaData())); } catch (Exception e) { log.error("Exception getting meta data using original key - " + originalKey, e); } } } } else if(map.size() == 0) { for (ServerGroup sGroup : instancesByZone.keySet()) { try { for (EVCacheClient client : instancesByZone.get(sGroup)) { map.put(client.getNodeLocator().getPrimary(key), null); } } catch (Exception e) { log.error("Error getting meta data", e); } } } if (log.isDebugEnabled()) log.debug("return map : " + map); return map; } public EVCacheItemMetaData metaDebug(String key, boolean isOriginalKeyHashed) throws EVCacheException { return this.metaDebugInternal(key, isOriginalKeyHashed); } public Map<MemcachedNode, EVCacheItemMetaData> metaDebugPerClient(String key, boolean isOriginalKeyHashed) throws EVCacheException { Map<MemcachedNode, EVCacheItemMetaData> map = new HashMap<>(); final Map<ServerGroup, List<EVCacheClient>> instancesByZone = _pool.getAllInstancesByZone(); final EVCacheKey evcKey = getEVCacheKey(key); for (ServerGroup sGroup : instancesByZone.keySet()) { try { for (EVCacheClient client : instancesByZone.get(sGroup)) { EVCacheItemMetaData itemMetaData = getEVCacheItemMetaData(client, evcKey, true, false, isOriginalKeyHashed); map.put(client.getNodeLocator().getPrimary(key), itemMetaData); } } catch (Exception e) { log.error("Error getting meta data", e); } } return map; } public Future<Boolean>[] delete(String key, boolean isOriginalKeyHashed) throws EVCacheException { return this.deleteInternal(key, isOriginalKeyHashed); } public EVCacheInternalImpl(String appName, String cacheName, int timeToLive, Transcoder<?> transcoder, boolean enableZoneFallback, boolean throwException, EVCacheClientPoolManager poolManager) { super(appName, cacheName, timeToLive, transcoder, enableZoneFallback, throwException, poolManager); } public EVCacheLatch addOrSetToWriteOnly(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException { EVCacheClient[] clients = _pool.getWriteOnlyEVCacheClients(); if (replaceItem) return set(key, value, null, timeToLive, policy, clients, 0); else return add(key, value, null, timeToLive, policy, clients, 0, false); } public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups) throws EVCacheException { return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroups, null); } public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName) throws EVCacheException { return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroupName, null); } public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName, List<String> destinationIps) throws EVCacheException { List<String> serverGroups = new ArrayList<>(); serverGroups.add(serverGroupName); return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroups, destinationIps); } private EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups, List<String> destinationIps) throws EVCacheException { Map<ServerGroup, List<EVCacheClient>> clientsByServerGroup = _pool.getAllInstancesByZone(); List<EVCacheClient> evCacheClients = clientsByServerGroup.entrySet().stream() .filter(entry -> serverGroups.contains(entry.getKey().getName())) .map(Map.Entry::getValue) .flatMap(List::stream) .collect(Collectors.toList()); if (null != destinationIps && !destinationIps.isEmpty()) { // identify that evcache client whose primary node is the destination ip for the key being processed evCacheClients = evCacheClients.stream().filter(client -> destinationIps.contains(((InetSocketAddress) client.getNodeLocator() .getPrimary(getEVCacheKey(key).getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder())) .getSocketAddress()).getAddress().getHostAddress()) ).collect(Collectors.toList()); } EVCacheClient[] evCacheClientsArray = new EVCacheClient[evCacheClients.size()]; evCacheClients.toArray(evCacheClientsArray); if (replaceItem) { return this.set(key, value, null, timeToLive, policy, evCacheClientsArray, evCacheClientsArray.length); } else { // given that we do not want to replace items, we should explicitly set fixup to false, otherwise "add" can // result in "set" during fixup which can result in replacing items return this.add(key, value, null, timeToLive, policy, evCacheClientsArray, evCacheClientsArray.length, false); } } public KeyHashedState isKeyHashed(String appName, String serverGroup) { PropertyRepository propertyRepository = _poolManager.getEVCacheConfig().getPropertyRepository(); boolean isKeyHashedAtAppOrAsg = propertyRepository.get(serverGroup + ".hash.key", Boolean.class).orElseGet(appName + ".hash.key").orElse(false).get(); if (isKeyHashedAtAppOrAsg) { return KeyHashedState.YES; } if (propertyRepository.get(appName + ".auto.hash.keys", Boolean.class).orElseGet("evcache.auto.hash.keys").orElse(false).get()) { return KeyHashedState.MAYBE; } return KeyHashedState.NO; } }
4,012
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheImplMBean.java
package com.netflix.evcache; public interface EVCacheImplMBean extends EVCache { }
4,013
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheSerializingTranscoder.java
/** * Copyright (C) 2006-2009 Dustin Sallings * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING * IN THE SOFTWARE. */ package com.netflix.evcache; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.ServerGroup; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Tag; import com.netflix.spectator.api.Timer; import net.spy.memcached.CachedData; import net.spy.memcached.transcoders.BaseSerializingTranscoder; import net.spy.memcached.transcoders.Transcoder; import net.spy.memcached.transcoders.TranscoderUtils; import net.spy.memcached.util.StringUtils; import java.time.Duration; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; /** * Transcoder that serializes and compresses objects. */ public class EVCacheSerializingTranscoder extends BaseSerializingTranscoder implements Transcoder<Object> { // General flags static final int SERIALIZED = 1; static final int COMPRESSED = 2; // Special flags for specially handled types. private static final int SPECIAL_MASK = 0xff00; static final int SPECIAL_BOOLEAN = (1 << 8); static final int SPECIAL_INT = (2 << 8); static final int SPECIAL_LONG = (3 << 8); static final int SPECIAL_DATE = (4 << 8); static final int SPECIAL_BYTE = (5 << 8); static final int SPECIAL_FLOAT = (6 << 8); static final int SPECIAL_DOUBLE = (7 << 8); static final int SPECIAL_BYTEARRAY = (8 << 8); static final String COMPRESSION = "COMPRESSION_METRIC"; private final TranscoderUtils tu = new TranscoderUtils(true); private Timer timer; /** * Get a serializing transcoder with the default max data size. */ public EVCacheSerializingTranscoder() { this(CachedData.MAX_SIZE); } /** * Get a serializing transcoder that specifies the max data size. */ public EVCacheSerializingTranscoder(int max) { super(max); } @Override public boolean asyncDecode(CachedData d) { if ((d.getFlags() & COMPRESSED) != 0 || (d.getFlags() & SERIALIZED) != 0) { return true; } return super.asyncDecode(d); } /* * (non-Javadoc) * * @see net.spy.memcached.Transcoder#decode(net.spy.memcached.CachedData) */ public Object decode(CachedData d) { byte[] data = d.getData(); Object rv = null; if ((d.getFlags() & COMPRESSED) != 0) { data = decompress(d.getData()); } int flags = d.getFlags() & SPECIAL_MASK; if ((d.getFlags() & SERIALIZED) != 0 && data != null) { rv = deserialize(data); } else if (flags != 0 && data != null) { switch (flags) { case SPECIAL_BOOLEAN: rv = Boolean.valueOf(tu.decodeBoolean(data)); break; case SPECIAL_INT: rv = Integer.valueOf(tu.decodeInt(data)); break; case SPECIAL_LONG: rv = Long.valueOf(tu.decodeLong(data)); break; case SPECIAL_DATE: rv = new Date(tu.decodeLong(data)); break; case SPECIAL_BYTE: rv = Byte.valueOf(tu.decodeByte(data)); break; case SPECIAL_FLOAT: rv = new Float(Float.intBitsToFloat(tu.decodeInt(data))); break; case SPECIAL_DOUBLE: rv = new Double(Double.longBitsToDouble(tu.decodeLong(data))); break; case SPECIAL_BYTEARRAY: rv = data; break; default: getLogger().warn("Undecodeable with flags %x", flags); } } else { rv = decodeString(data); } return rv; } /* * (non-Javadoc) * * @see net.spy.memcached.Transcoder#encode(java.lang.Object) */ public CachedData encode(Object o) { byte[] b = null; int flags = 0; if (o instanceof String) { b = encodeString((String) o); if (StringUtils.isJsonObject((String) o)) { return new CachedData(flags, b, getMaxSize()); } } else if (o instanceof Long) { b = tu.encodeLong((Long) o); flags |= SPECIAL_LONG; } else if (o instanceof Integer) { b = tu.encodeInt((Integer) o); flags |= SPECIAL_INT; } else if (o instanceof Boolean) { b = tu.encodeBoolean((Boolean) o); flags |= SPECIAL_BOOLEAN; } else if (o instanceof Date) { b = tu.encodeLong(((Date) o).getTime()); flags |= SPECIAL_DATE; } else if (o instanceof Byte) { b = tu.encodeByte((Byte) o); flags |= SPECIAL_BYTE; } else if (o instanceof Float) { b = tu.encodeInt(Float.floatToRawIntBits((Float) o)); flags |= SPECIAL_FLOAT; } else if (o instanceof Double) { b = tu.encodeLong(Double.doubleToRawLongBits((Double) o)); flags |= SPECIAL_DOUBLE; } else if (o instanceof byte[]) { b = (byte[]) o; flags |= SPECIAL_BYTEARRAY; } else { b = serialize(o); flags |= SERIALIZED; } assert b != null; if (b.length > compressionThreshold) { byte[] compressed = compress(b); if (compressed.length < b.length) { getLogger().debug("Compressed %s from %d to %d", o.getClass().getName(), b.length, compressed.length); b = compressed; flags |= COMPRESSED; } else { getLogger().info("Compression increased the size of %s from %d to %d", o.getClass().getName(), b.length, compressed.length); } long compression_ratio = Math.round((double) compressed.length / b.length * 100); updateTimerWithCompressionRatio(compression_ratio); } return new CachedData(flags, b, getMaxSize()); } private void updateTimerWithCompressionRatio(long ratio_percentage) { if(timer == null) { final List<Tag> tagList = new ArrayList<Tag>(1); tagList.add(new BasicTag(EVCacheMetricsFactory.COMPRESSION_TYPE, "gzip")); timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.COMPRESSION_RATIO, tagList, Duration.ofMillis(100)); }; timer.record(ratio_percentage, TimeUnit.MILLISECONDS); } }
4,014
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCache.java
package com.netflix.evcache; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; import javax.annotation.Nullable; import javax.inject.Inject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.netflix.evcache.EVCacheLatch.Policy; import com.netflix.evcache.operation.EVCacheItem; import com.netflix.evcache.operation.EVCacheItemMetaData; import com.netflix.evcache.pool.EVCacheClientPoolManager; import net.spy.memcached.transcoders.Transcoder; import rx.Scheduler; import rx.Single; /** * An abstract interface for interacting with an Ephemeral Volatile Cache. * * <h3>Example</h3> * <p> * To create an instance of EVCache with AppName="EVCACHE", cachePrefix="Test" * and DefaultTTL="3600" * * <b>Dependency Injection (Guice) Approach</b> <blockquote> * * <pre> * {@literal @}Inject * public MyClass(EVCache.Builder builder,....) { * EVCache myCache = builder.setAppName("EVCACHE").setCachePrefix("Test").setDefaultTTL(3600).build(); * } * </pre> * * </blockquote> * * Below is an example to set value="John Doe" for key="name" <blockquote> * * <pre> * myCache.set("name", "John Doe"); * </pre> * * </blockquote> * * * To read the value for key="name" <blockquote> * * <pre> * String value = myCache.get("name"); * </pre> * * </blockquote> * * </p> * * @author smadappa */ public interface EVCache { // TODO: Remove Async methods (Project rx) and rename COMPLETABLE_* with ASYNC_* public static enum Call { GET, GETL, GET_AND_TOUCH, ASYNC_GET, BULK, SET, DELETE, INCR, DECR, TOUCH, APPEND, PREPEND, REPLACE, ADD, APPEND_OR_ADD, GET_ALL, META_GET, META_SET, META_DEBUG, COMPLETABLE_FUTURE_GET, COMPLETABLE_FUTURE_GET_BULK }; /** * Set an object in the EVCACHE (using the default Transcoder) regardless of * any existing value. * * The <code>timeToLive</code> value passed to memcached is as specified in * the defaultTTL value for this cache * * @param key * the key under which this object should be added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @return Array of futures representing the processing of this operation * across all replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues with Serializing the value or any * IO Related issues */ <T> Future<Boolean>[] set(String key, T value) throws EVCacheException; /** * Set an object in the EVCACHE (using the default Transcoder) regardless of * any existing value. * * The <code>timeToLive</code> value is passed to memcached exactly as * given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time a.k.a EPOC * time (number of seconds since January 1, 1970, as a 32-bit int value), or * a number of seconds starting from current time. In the latter case, this * number of seconds may not exceed 60*60*24*30 (number of seconds in 30 * days); if the number sent by a client is larger than that, the server * will consider it to be real Unix time value rather than an offset from * current time. </blockquote> * * @param key * the key under which this object should be added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] set(String key, T value, int timeToLive) throws EVCacheException; /** * Set an object in the EVCACHE using the given Transcoder regardless of any * existing value. * * The <code>timeToLive</code> value is passed to memcached exactly as * given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time a.k.a EPOC * time (number of seconds since January 1, 1970, as a 32-bit int value), or * a number of seconds starting from current time. In the latter case, this * number of seconds may not exceed 60*60*24*30 (number of seconds in 30 * days); if the number sent by a client is larger than that, the server * will consider it to be real Unix time value rather than an offset from * current time. </blockquote> * * @param key * the key under which this object should be added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] set(String key, T value, Transcoder<T> tc) throws EVCacheException; /** * Set an object in the EVCACHE using the given Transcoder regardless of any existing value using the default TTL and Transcoder. * * The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time aka EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote> * * @param key * the key under which this object should be added. * Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @param policy * The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed. * @return Array of futures representing the processing of this operation across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues */ <T> EVCacheLatch set(String key, T value, EVCacheLatch.Policy policy) throws EVCacheException; /** * Set an object in the EVCACHE using the given Transcoder regardless of any existing value with the given TTL. * * The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time a.k.a EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote> * * @param key * the key under which this object should be added. Ensure the key is properly encoded and does not contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @param timeToLive * the expiration of this object i.e. less than 30 days in seconds or the exact expiry time as UNIX time * @param policy * The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed. * @return Array of futures representing the processing of this operation across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues */ <T> EVCacheLatch set(String key, T value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException; /** * Set an object in the EVCACHE using the given Transcoder regardless of any existing value using the given Transcoder. * * The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time aka EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote> * * @param key * the key under which this object should be added. Ensure the key is properly encoded and does not contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @param tc * the Transcoder to serialize the data * @param policy * The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed. * @return Array of futures representing the processing of this operation across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues */ <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException; /** * Set an object in the EVCACHE using the given Transcoder regardless of any * existing value. * * The <code>timeToLive</code> value is passed to memcached exactly as * given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time aka EPOC time * (number of seconds since January 1, 1970, as a 32-bit int value), or a * number of seconds starting from current time. In the latter case, this * number of seconds may not exceed 60*60*24*30 (number of seconds in 30 * days); if the number sent by a client is larger than that, the server * will consider it to be real Unix time value rather than an offset from * current time. </blockquote> * * @param key * the key under which this object should be added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @param tc * the Transcoder to serialize the data * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param policy * The Latch will be returned based on the Policy. The Latch can * then be used to await until the count down has reached to 0 or * the specified time has elapsed. * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException; /** * Replace an existing object in the EVCACHE using the default Transcoder & * default TTL. If the object does not exist in EVCACHE then the value is * not replaced. * * @param key * the key under which this object should be replaced. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to replace * @param policy * The Latch will be returned based on the Policy. The Latch can * then be used to await until the count down has reached to 0 or * the specified time has elapsed. * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation based on the policy to ensure the required * criteria is met. The Latch can also be queried to get details on * status of the operations * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> EVCacheLatch replace(String key, T value, EVCacheLatch.Policy policy) throws EVCacheException; /** * Replace an existing object in the EVCACHE using the given Transcoder & * default TTL. If the object does not exist in EVCACHE then the value is * not replaced. * * @param key * the key under which this object should be replaced. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to replace * @param tc * the Transcoder to serialize the data * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param policy * The Latch will be returned based on the Policy. The Latch can * then be used to await until the count down has reached to 0 or * the specified time has elapsed. * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation based on the policy to ensure the required * criteria is met. The Latch can also be queried to get details on * status of the operations * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException; /** * Replace an existing object in the EVCACHE using the given Transcoder. If * the object does not exist in EVCACHE then the value is not replaced. * * The <code>timeToLive</code> value is passed to memcached exactly as * given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time aka EPOC time * (number of seconds since January 1, 1970, as a 32-bit int value), or a * number of seconds starting from current time. In the latter case, this * number of seconds may not exceed 60*60*24*30 (number of seconds in 30 * days); if the number sent by a client is larger than that, the server * will consider it to be real Unix time value rather than an offset from * current time. </blockquote> * * @param key * the key under which this object should be replaced. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to replace * @param tc * the Transcoder to serialize the data * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param policy * The Latch will be returned based on the Policy. The Latch can * then be used to await until the count down has reached to 0 or * the specified time has elapsed. * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation based on the policy to ensure the required * criteria is met. The Latch can also be queried to get details on * status of the operations * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException; /** * Set an object in the EVCACHE using the given {@link Transcoder}regardless of any * existing value. * * The <code>timeToLive</code> value is passed to memcached exactly as * given, and will be processed per the memcached protocol specification: * * <blockquote> The actual value sent may either be Unix time aka EPOC time * (number of seconds since January 1, 1970, as a 32-bit int value), or a * number of seconds starting from current time. In the latter case, this * number of seconds may not exceed 60*60*24*30 (number of seconds in 30 * days); if the number sent by a client is larger than that, the server * will consider it to be real Unix time value rather than an offset from * current time. </blockquote> * * @param key * the key under which this object should be added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 250 characters. * @param T * the object to store * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] set(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException; /** * Remove a current key value relation from the Cache. * * @param key * the non-null key corresponding to the relation to be removed. * Ensure the key is properly encoded and does not contain * whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @return Array of futures representing the processing of this operation * across all the replicas. If the future returns true then the key * was deleted from Cache, if false then the key was not found thus * not deleted. Note: In effect the outcome was what was desired. * Note: If the null is returned then the operation timed out and * probably the key was not deleted. In such scenario retry the * operation. * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or any IO Related issues */ Future<Boolean>[] delete(String key) throws EVCacheException; /** * Remove a current key value relation from the Cache. * * @param key * the non-null key corresponding to the relation to be removed. * Ensure the key is properly encoded and does not contain * whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param policy * The Latch will be returned based on the Policy. The Latch can * then be used to await until the count down has reached to 0 or * the specified time has elapsed. * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation based on the policy to ensure the required * criteria is met. The Latch can also be queried to get details on * status of the operations * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or any IO Related issues */ <T> EVCacheLatch delete(String key, EVCacheLatch.Policy policy) throws EVCacheException; /** * Retrieve the value for the given key. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @return the Value for the given key from the cache (null if there is * none). * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues * * Note: If the data is replicated by zone, then we can get the * value from the zone local to the client. If we cannot find * this value then null is returned. This is transparent to the * users. */ <T> T get(String key) throws EVCacheException; /** * Async Retrieve the value for the given key. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @return the Value for the given key from the cache (null if there is * none). * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues * * Note: If the data is replicated by zone, then we can get the * value from the zone local to the client. If we cannot find * this value then null is returned. This is transparent to the * users. */ <T> CompletableFuture<T> getAsync(String key) throws EVCacheException; /** * Retrieve the value for the given key. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param scheduler * the {@link Scheduler} to perform subscription actions on * @return the Value for the given key from the cache (null if there is * none). */ <T> Single<T> get(String key, Scheduler scheduler); /** * Retrieve the value for the given a key using the specified Transcoder for * deserialization. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param tc * the Transcoder to deserialize the data * @return the Value for the given key from the cache (null if there is * none). * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues * * Note: If the data is replicated by zone, then we can get the * value from the zone local to the client. If we cannot find * this value then null is returned. This is transparent to the * users. */ <T> T get(String key, Transcoder<T> tc) throws EVCacheException; /** * Async Retrieve the value for the given a key using the specified Transcoder for * deserialization. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param tc * the Transcoder to deserialize the data * @return the Completable Future of value for the given key from the cache (null if there is * none). * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues * * Note: If the data is replicated by zone, then we can get the * value from the zone local to the client. If we cannot find * this value then null is returned. This is transparent to the * users. */ <T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) throws EVCacheException; /** * Retrieve the meta data for the given a key * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @return the metadata for the given key from the cache (null if there is * none). * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues due IO * Related issues * * Note: If the data is replicated by zone, then we get the metadata * from the zone local to the client. If we cannot find * the value then we try other zones, If all are unsuccessful then null is returned. */ default EVCacheItemMetaData metaDebug(String key) throws EVCacheException { throw new EVCacheException("Default implementation. If you are implementing EVCache interface you need to implement this method."); } /** * Retrieve the value & its metadata for the given a key using the specified Transcoder for * deserialization. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param tc * the Transcoder to deserialize the data * @return the Value for the given key from the cache (null if there is * none) and its metadata all encapsulated in EVCacheItem. * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues * * Note: If the data is replicated by zone, then we can get the * value from the zone local to the client. If we cannot find * this value we retry other zones, if still not found, then null is returned. */ default <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc) throws EVCacheException { throw new EVCacheException("Default implementation. If you are implementing EVCache interface you need to implement this method."); } /** * Retrieve the value for the given a key using the specified Transcoder for * deserialization. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 250 characters. * @param tc * the Transcoder to deserialize the data * @param policy * The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed. * * @return the Value for the given key from the cache (null if there is * none). * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues * * Note: If the data is replicated by zone, then we can get the * value from the zone local to the client. If we cannot find * this value then null is returned. This is transparent to the * users. */ <T> T get(String key, Transcoder<T> tc, Policy policy) throws EVCacheException; /** * Retrieve the value for the given a key using the specified Transcoder for * deserialization. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param tc * the Transcoder to deserialize the data * @param scheduler * the {@link Scheduler} to perform subscription actions on * @return the Value for the given key from the cache (null if there is * none). */ <T> Single<T> get(String key, Transcoder<T> tc, Scheduler scheduler); /** * Retrieve the value for the given a key using the default Transcoder for * deserialization and reset its expiration using the passed timeToLive. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param timeToLive * the new expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param scheduler * the {@link Scheduler} to perform subscription actions on * @return the Value for the given key from the cache (null if there is * none). */ <T> Single<T> getAndTouch(String key, int timeToLive, Scheduler scheduler); /** * Retrieve the value for the given a key using the default Transcoder for * deserialization and reset its expiration using the passed timeToLive. * * @param key * key to get. Ensure the key is properly encoded and does not * contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param timeToLive * the new expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param tc * the Transcoder to deserialize the data * @param scheduler * the {@link Scheduler} to perform subscription actions on * @return the Value for the given key from the cache (null if there is * none). */ <T> Single<T> getAndTouch(String key, int timeToLive, Transcoder<T> tc, Scheduler scheduler); /** * Get with a single key and reset its expiration. * * @param key * the key to get. Ensure the key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param timeToLive * the new expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @return the result from the cache (null if there is none) * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> T getAndTouch(String key, int timeToLive) throws EVCacheException; /** * Get with a single key and reset its expiration. * * @param key * the key to get. Ensure the key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param timeToLive * the new expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param tc * the Transcoder to deserialize the data * @return the result from the cache (null if there is none) * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> T getAndTouch(String key, int timeToLive, Transcoder<T> tc) throws EVCacheException; /** * Retrieve the value of a set of keys. * * @param keys * the keys for which we need the values. Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @return a map of the values (for each value that exists). If the Returned * map contains the key but the value in null then the key does not * exist in the cache. if a key is missing then we were not able to * retrieve the data for that key due to some exception * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> Map<String, T> getBulk(String... keys) throws EVCacheException; /** * Async Retrieve the value of a set of keys. * * @param keys * the keys for which we need the values. Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @return a map of the values (for each value that exists). If the Returned * map contains the key but the value in null then the key does not * exist in the cache. if a key is missing then we were not able to * retrieve the data for that key due to some exception */ <T> CompletableFuture<Map<String, T>> getAsyncBulk(String... keys); /** * Retrieve the value for a set of keys, using a specified Transcoder for * deserialization. * * @param keys * keys to which we need the values.Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param tc * the transcoder to use for deserialization * @return a map of the values (for each value that exists). If the Returned * map contains the key but the value in null then the key does not * exist in the cache. if a key is missing then we were not able to * retrieve the data for that key due to some exception * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> Map<String, T> getBulk(Transcoder<T> tc, String... keys) throws EVCacheException; /** * Async Retrieve the value for a set of keys, using a specified Transcoder for * deserialization. In Beta testing (To be used by gateway team) * * @param keys * keys to which we need the values.Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param tc * the transcoder to use for deserialization * @return a map of the values (for each value that exists). If the Returned * map contains the key but the value in null then the key does not * exist in the cache. if a key is missing then we were not able to * retrieve the data for that key due to some exception */ <T> CompletableFuture<Map<String, T>> getAsyncBulk(Collection<String> keys, Transcoder<T> tc); /** * Retrieve the value for the collection of keys, using the default * Transcoder for deserialization. * * @param keys * The collection of keys for which we need the values. Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @return a map of the values (for each value that exists). If the Returned * map contains the key but the value in null then the key does not * exist in the cache. if a key is missing then we were not able to * retrieve the data for that key due to some exception * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> Map<String, T> getBulk(Collection<String> keys) throws EVCacheException; /** * Retrieve the value for the collection of keys, using the specified * Transcoder for deserialization. * * @param keys * The collection of keys for which we need the values. Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param tc * the transcoder to use for deserialization * @return a map of the values (for each value that exists). If the Returned * map contains the key but the value in null then the key does not * exist in the cache. if a key is missing then we were not able to * retrieve the data for that key due to some exception * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> Map<String, T> getBulk(Collection<String> keys, Transcoder<T> tc) throws EVCacheException; /** * Retrieve the value for the collection of keys, using the specified * Transcoder for deserialization. * * @param keys * The collection of keys for which we need the values. Ensure each key is properly encoded and does * not contain whitespace or control characters. The max length of the key (including prefix) * is 200 characters. * @param tc * the transcoder to use for deserialization * @param timeToLive * the new expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @return a map of the values (for each value that exists). If the value of * the given key does not exist then null is returned. Only the keys * whose value are not null and exist in the returned map are set to * the new TTL as specified in timeToLive. * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues during deserialization or any IO * Related issues */ <T> Map<String, T> getBulkAndTouch(Collection<String> keys, Transcoder<T> tc, int timeToLive) throws EVCacheException; /** * Get the value for given key asynchronously and deserialize it with the * default transcoder. * * @param key * the key for which we need the value. Ensure the key is * properly encoded and does not contain whitespace or control * characters. The max length of the key (including prefix) * is 200 characters. * @return the Futures containing the Value or null. * @throws EVCacheException * in the circumstance where queue is too full to accept any * more requests or issues during deserialization or timeout * retrieving the value or any IO Related issues * * @deprecated This is a sub-optimal operation does not support Retries, Fast Failures, FIT, GC Detection, etc. * Will be removed in a subsequent release */ <T> Future<T> getAsynchronous(String key) throws EVCacheException; /** * Get the value for given key asynchronously and deserialize it with the * given transcoder. * * @param key * the key for which we need the value. Ensure the key is * properly encoded and does not contain whitespace or control * characters. The max length of the key (including prefix) * is 200 characters. * @param tc * the transcoder to use for deserialization * @return the Futures containing the Value or null. * @throws EVCacheException * in the circumstance where queue is too full to accept any * more requests or issues during deserialization or timeout * retrieving the value or any IO Related issues * * @deprecated This is a sub-optimal operation does not support Retries, Fast Failures, FIT, GC Detection, etc. * Will be removed in a subsequent release */ <T> Future<T> getAsynchronous(String key, Transcoder<T> tc) throws EVCacheException; /** * Increment the given counter, returning the new value. * * @param key * the key. Ensure the key is * properly encoded and does not contain whitespace or control * characters. The max length of the key (including prefix) * is 200 characters. * @param by * the amount to increment * @param def * the default value (if the counter does not exist) * @param exp * the expiration of this object * @return the new value, or -1 if we were unable to increment or add * @throws EVCacheException * in the circumstance where timeout is exceeded or queue is * full * */ public long incr(String key, long by, long def, int exp) throws EVCacheException; /** * Decrement the given counter, returning the new value. * * @param key * the key. Ensure the key is * properly encoded and does not contain whitespace or control * characters. The max length of the key (including prefix) * is 200 characters. * @param by * the amount to decrement * @param def * the default value (if the counter does not exist) * @param exp * the expiration of this object * @return the new value, or -1 if we were unable to decrement or add * @throws EVCacheException * in the circumstance where timeout is exceeded or queue is * full * */ public long decr(String key, long by, long def, int exp) throws EVCacheException; /** * Append the given value to the existing value in EVCache. You cannot * append if the key does not exist in EVCache. If the value has not changed * then false will be returned. * * @param key * the key under which this object should be appended. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param T * the value to be appended * @param tc * the transcoder the will be used for serialization * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the circumstance where queue is too full to accept any * more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] append(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException; /** * Append the given value to the existing value in EVCache. You cannot * append if the key does not exist in EVCache. If the value has not changed * or does not exist then false will be returned. * * @param key * the key under which this object should be appended. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param T * the value to be appended * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the circumstance where queue is too full to accept any * more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] append(String key, T value, int timeToLive) throws EVCacheException; /** * @deprecated Please use {@link #<T> EVCacheLatch add(String, T, Transcoder<T> , int, Policy) throws EVCacheException;} * * Add the given value to EVCache. You cannot add if the key already exist in EVCache. * * @param key * the key which this object should be added to. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param T * the value to be added * @param tc * the transcoder the will be used for serialization * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * * @return boolean which indicates if the add was successful or not. * The operation will fail with a false response if the data already exists in EVCache. * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> boolean add(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException; /** * Add the given value to EVCache. You cannot add if the key already exist in EVCache. * * @param key * the key which this object should be added to. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param T * the value to be added * @param tc * the transcoder the will be used for serialization * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * @param policy * The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed. * * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation to ensure all adds are successful. If there are any partial success * The client will try and fix the Data. * * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException; /** * Touch the given key and reset its expiration time. * * @param key * the key to touch. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param ttl * the new expiration time in seconds * * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] touch(String key, int ttl) throws EVCacheException; /** * Touch the given key and reset its expiration time. * * @param key * the key to touch. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param ttl * the new expiration time in seconds * * @param policy * The Latch will be returned based on the Policy. The Latch can * then be used to await until the count down has reached to 0 or * the specified time has elapsed. * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation based on the policy to ensure the required * criteria is met. The Latch can also be queried to get details on * status of the operations * * @throws EVCacheException * in the rare circumstance where queue is too full to accept * any more requests or any IO Related issues */ <T> EVCacheLatch touch(String key, int ttl, EVCacheLatch.Policy policy) throws EVCacheException; /** * Append the given value to the existing value in EVCache. If the Key does not exist the the key will added. * * * @param key * the key under which this object should be appended or Added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param T * the value to be appended * @param tc * the transcoder the will be used for serialization * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * * @return Array of futures representing the processing of this operation * across all the replicas * @throws EVCacheException * in the circumstance where queue is too full to accept any * more requests or issues Serializing the value or any IO * Related issues */ <T> Future<Boolean>[] appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException; /** * Append the given value to the existing value in EVCache. If the Key does not exist the the key will added. * * * @param key * the key under which this object should be appended or Added. Ensure the * key is properly encoded and does not contain whitespace or * control characters. The max length of the key (including prefix) * is 200 characters. * @param T * the value to be appended * @param tc * the transcoder the will be used for serialization * @param timeToLive * the expiration of this object i.e. less than 30 days in * seconds or the exact expiry time as UNIX time * * @param policy * The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed. * * @return EVCacheLatch which will encompasses the Operation. You can block * on the Operation based on the policy to ensure the required * criteria is met. The Latch can also be queried to get details on * status of the operations * * @throws EVCacheException * in the circumstance where queue is too full to accept any * more requests or issues Serializing the value or any IO * Related issues */ <T> EVCacheLatch appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException; /** * The {@code appName} that will be used by this {@code EVCache}. * * @param The * name of the EVCache App cluster. * @return this {@code Builder} object */ String getAppName(); /** * The {@code cachePrefix} that will be used by this {@code EVCache}. * * @param The * name of the EVCache App cluster. * @return this {@code Builder} object */ String getCachePrefix(); /** * A Builder that builds an EVCache based on the specified App Name, cache * Name, TTl and Transcoder. * * @author smadappa */ public class Builder { private static final Logger logger = LoggerFactory.getLogger(EVCacheImpl.class); private String _appName; private String _cachePrefix = null; private int _ttl = 900; private Transcoder<?> _transcoder = null; private boolean _serverGroupRetry = true; private boolean _enableExceptionThrowing = false; private List<Customizer> _customizers = new ArrayList<>(); @Inject private EVCacheClientPoolManager _poolManager; /** * Customizers allow post-processing of the Builder. This affords a way for libraries to * perform customization. */ @FunctionalInterface public interface Customizer { void customize(final String cacheName, final Builder builder); } public static class Factory { public Builder createInstance(String appName) { return Builder.forApp(appName); } } public static Builder forApp(final String appName) { return new Builder().setAppName(appName); } public Builder() { } public Builder withConfigurationProperties( final EVCacheClientPoolConfigurationProperties configurationProperties) { return this .setCachePrefix(configurationProperties.getKeyPrefix()) .setDefaultTTL(configurationProperties.getTimeToLive()) .setRetry(configurationProperties.getRetryEnabled()) .setExceptionThrowing(configurationProperties.getExceptionThrowingEnabled()); } /** * The {@code appName} that will be used by this {@code EVCache}. * * @param The * name of the EVCache App cluster. * @return this {@code Builder} object */ public Builder setAppName(String appName) { if (appName == null) throw new IllegalArgumentException("param appName cannot be null."); this._appName = appName.toUpperCase(Locale.US); if (!_appName.startsWith("EVCACHE")) logger.warn("Make sure the app you are connecting to is EVCache App"); return this; } /** * Adds {@code cachePrefix} to the key. This ensures there are no cache * collisions if the same EVCache app is used across multiple use cases. * If the cache is not shared we recommend to set this to * <code>null</code>. Default is <code>null</code>. * * @param cacheName. * The cache prefix cannot contain colon (':') in it. * @return this {@code Builder} object */ public Builder setCachePrefix(String cachePrefix) { if (_cachePrefix != null && _cachePrefix.indexOf(':') != -1) throw new IllegalArgumentException( "param cacheName cannot contain ':' character."); this._cachePrefix = cachePrefix; return this; } /** * @deprecated Please use {@link #setCachePrefix(String)} * @see #setCachePrefix(String) * * Adds {@code cacheName} to the key. This ensures there are no * cache collisions if the same EVCache app is used for across * multiple use cases. * * @param cacheName * @return this {@code Builder} object */ public Builder setCacheName(String cacheName) { return setCachePrefix(cacheName); } /** * The default Time To Live (TTL) for items in {@link EVCache} in * seconds. You can override the value by passing the desired TTL with * {@link EVCache#set(String, Object, int)} operations. * * @param ttl. Default is 900 seconds. * @return this {@code Builder} object */ public Builder setDefaultTTL(int ttl) { if (ttl < 0) throw new IllegalArgumentException("Time to Live cannot be less than 0."); this._ttl = ttl; return this; } /** * The default Time To Live (TTL) for items in {@link EVCache} in * seconds. You can override the value by passing the desired TTL with * {@link EVCache#set(String, Object, int)} operations. * * @param ttl. Default is 900 seconds. * @return this {@code Builder} object */ public Builder setDefaultTTL(@Nullable final Duration ttl) { if (ttl == null) { return this; } return setDefaultTTL((int) ttl.getSeconds()); } @VisibleForTesting Transcoder<?> getTranscoder() { return this._transcoder; } /** * The default {@link Transcoder} to be used for serializing and * de-serializing items in {@link EVCache}. * * @param transcoder * @return this {@code Builder} object */ public <T> Builder setTranscoder(Transcoder<T> transcoder) { this._transcoder = transcoder; return this; } /** * @deprecated Please use {@link #enableRetry()} * * Will enable retries across Zone (Server Group). * * @return this {@code Builder} object */ public <T> Builder enableZoneFallback() { this._serverGroupRetry = true; return this; } /** * Will enable or disable retry across Server Group for cache misses and exceptions * if there are multiple Server Groups for the given EVCache App and * data is replicated across them. This ensures the Hit Rate continues * to be unaffected whenever a server group loses instances. * * By Default retry is enabled. * * @param enableRetry whether retries are to be enabled * @return this {@code Builder} object */ public Builder setRetry(boolean enableRetry) { this._serverGroupRetry = enableRetry; return this; } /** * Will enable retry across Server Group for cache misses and exceptions * if there are multiple Server Groups for the given EVCache App and * data is replicated across them. This ensures the Hit Rate continues * to be unaffected whenever a server group loses instances. * * By Default retry is enabled. * * @return this {@code Builder} object */ public <T> Builder enableRetry() { this._serverGroupRetry = true; return this; } /** * Will disable retry across Server Groups. This means if the data is * not found in one server group null is returned. * * @return this {@code Builder} object */ public <T> Builder disableRetry() { this._serverGroupRetry = false; return this; } /** * @deprecated Please use {@link #disableRetry()} * * Will disable retry across Zone (Server Group). * * @return this {@code Builder} object */ public <T> Builder disableZoneFallback() { this._serverGroupRetry = false; return this; } /** * By Default exceptions are not propagated and null values are * returned. By enabling exception propagation we return the * {@link EVCacheException} whenever the operations experience them. * * @param enableExceptionThrowing whether exception throwing is to be enabled * @return this {@code Builder} object */ public Builder setExceptionThrowing(boolean enableExceptionThrowing) { this._enableExceptionThrowing = enableExceptionThrowing; return this; } /** * By Default exceptions are not propagated and null values are * returned. By enabling exception propagation we return the * {@link EVCacheException} whenever the operations experience them. * * @return this {@code Builder} object */ public <T> Builder enableExceptionPropagation() { this._enableExceptionThrowing = true; return this; } /** * Adds customizers to be applied by {@code customize}. * * @param customizers List of {@code Customizer}s * @return this {@code Builder} object */ public Builder addCustomizers(@Nullable final List<Customizer> customizers) { this._customizers.addAll(customizers); return this; } /** * Applies {@code Customizer}s added through {@code addCustomizers} to {@this}. * * @return this {@code Builder} object */ public Builder customize() { _customizers.forEach(customizer -> { customizeWith(customizer); }); return this; } /** * Customizes {@this} with the {@code customizer}. * * @param customizer {@code Customizer} or {@code Consumer<String, Builder>} to be applied to {@code this}. * @return this {@code Builder} object */ public Builder customizeWith(final Customizer customizer) { customizer.customize(this._appName, this); return this; } protected EVCache newImpl(String appName, String cachePrefix, int ttl, Transcoder<?> transcoder, boolean serverGroupRetry, boolean enableExceptionThrowing, EVCacheClientPoolManager poolManager) { return new EVCacheImpl(appName, cachePrefix, ttl, transcoder, serverGroupRetry, enableExceptionThrowing, poolManager); } /** * Returns a newly created {@code EVCache} based on the contents of the * {@code Builder}. */ @SuppressWarnings("deprecation") public EVCache build() { if (_poolManager == null) { _poolManager = EVCacheClientPoolManager.getInstance(); if (logger.isDebugEnabled()) logger.debug("_poolManager - " + _poolManager + " through getInstance"); } if (_appName == null) { throw new IllegalArgumentException("param appName cannot be null."); } if(_cachePrefix != null) { for(int i = 0; i < _cachePrefix.length(); i++) { if(Character.isWhitespace(_cachePrefix.charAt(i))){ throw new IllegalArgumentException("Cache Prefix ``" + _cachePrefix + "`` contains invalid character at position " + i ); } } } customize(); return newImpl(_appName, _cachePrefix, _ttl, _transcoder, _serverGroupRetry, _enableExceptionThrowing, _poolManager); } } }
4,015
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheReadQueueException.java
package com.netflix.evcache; public class EVCacheReadQueueException extends EVCacheException { private static final long serialVersionUID = -7660503904923117538L; public EVCacheReadQueueException(String message) { super(message); } public EVCacheReadQueueException(String message, Throwable cause) { super(message, cause); } }
4,016
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/dto/EVCacheResponseStatus.java
package com.netflix.evcache.dto; public class EVCacheResponseStatus { private String status; public EVCacheResponseStatus(String status) { this.status = status; } public String getStatus() { return status; } public void setStatus(String status) { this.status = status; } }
4,017
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/dto/KeyMapDto.java
package com.netflix.evcache.dto; import com.netflix.evcache.EVCacheKey; import java.util.Map; public class KeyMapDto { Map<String, EVCacheKey> keyMap; boolean isKeyHashed; public KeyMapDto(Map<String, EVCacheKey> keyMap, boolean isKeyHashed) { this.keyMap = keyMap; this.isKeyHashed = isKeyHashed; } public Map<String, EVCacheKey> getKeyMap() { return keyMap; } public boolean isKeyHashed() { return isKeyHashed; } }
4,018
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheItem.java
package com.netflix.evcache.operation; public class EVCacheItem<T> { private final EVCacheItemMetaData item; private T data = null; private int flag = 0; public EVCacheItem() { item = new EVCacheItemMetaData(); } public EVCacheItemMetaData getItemMetaData() { return item; } public T getData() { return data; } public void setData(T data) { this.data = data; } public int getFlag() { return flag; } public void setFlag(int flag) { this.flag = flag; } @Override public String toString() { return "EVCacheItem [item=" + item + ", data=" + data + ", flag=" + flag + "]"; } }
4,019
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheOperationFuture.java
package com.netflix.evcache.operation; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicReference; import net.spy.memcached.ops.OperationState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.EVCacheGetOperationListener; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.ServerGroup; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Tag; import com.sun.management.GcInfo; import net.spy.memcached.MemcachedConnection; import net.spy.memcached.internal.CheckedOperationTimeoutException; import net.spy.memcached.internal.OperationFuture; import net.spy.memcached.ops.Operation; import rx.Scheduler; import rx.Single; import rx.functions.Action0; /** * Managed future for operations. * * <p> * From an OperationFuture, application code can determine if the status of a * given Operation in an asynchronous manner. * * <p> * If for example we needed to update the keys "user:<userid>:name", * "user:<userid>:friendlist" because later in the method we were going to * verify the change occurred as expected interacting with the user, we can fire * multiple IO operations simultaneously with this concept. * * @param <T> * Type of object returned from this future. */ @SuppressWarnings("restriction") @edu.umd.cs.findbugs.annotations.SuppressFBWarnings("EXS_EXCEPTION_SOFTENING_HAS_CHECKED") public class EVCacheOperationFuture<T> extends OperationFuture<T> { private static final Logger log = LoggerFactory.getLogger(EVCacheOperationFuture.class); private static final class LazySharedExecutor { private static final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor( 1, new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("evcache-timeout-%s") .setUncaughtExceptionHandler( (t, e) -> log.error( "{} timeout operation failed with exception: {}", t.getName(), e)) .build()); static { // We don't need to keep around all those cancellation tasks taking up memory once the // initial caller completes. executor.setRemoveOnCancelPolicy(true); } } private final CountDownLatch latch; private final AtomicReference<T> objRef; private Operation op; private final String key; private final long start; private final EVCacheClient client; public EVCacheOperationFuture(String k, CountDownLatch l, AtomicReference<T> oref, long opTimeout, ExecutorService service, EVCacheClient client) { super(k, l, oref, opTimeout, service); this.latch = l; this.objRef = oref; this.key = k; this.client = client; this.start = System.currentTimeMillis(); } public Operation getOperation() { return this.op; } public void setOperation(Operation to) { this.op = to; super.setOperation(to); } public String getApp() { return client.getAppName(); } public String getKey() { return key; } public String getZone() { return client.getZone(); } public ServerGroup getServerGroup() { return client.getServerGroup(); } public EVCacheClient getEVCacheClient() { return client; } public EVCacheOperationFuture<T> addListener(EVCacheGetOperationListener<T> listener) { super.addToListeners(listener); return this; } public EVCacheOperationFuture<T> removeListener(EVCacheGetOperationListener<T> listener) { super.removeFromListeners(listener); return this; } /** * Get the results of the given operation. * * As with the Future interface, this call will block until the results of * the future operation has been received. * * Note: If we detect there was GC pause and our operation was caught in * between we wait again to see if we will be successful. This is effective * as the timeout we specify is very low. * * @param duration * amount of time to wait * @param units * unit of time to wait * @param if * exeception needs to be thrown of null returned on a failed * operation * @param has * zone fallback * @return the operation results of this OperationFuture * @throws InterruptedException * @throws TimeoutException * @throws ExecutionException */ public T get(long duration, TimeUnit units, boolean throwException, boolean hasZF) throws InterruptedException, TimeoutException, ExecutionException { boolean status = latch.await(duration, units); if (!status) { status = handleGCPauseForGet(duration, units, throwException, hasZF); } if (status) MemcachedConnection.opSucceeded(op);// continuous timeout counter will be reset return objRef.get(); } private boolean handleGCPauseForGet(long duration, TimeUnit units, boolean throwException, boolean hasZF) throws InterruptedException, ExecutionException { boolean status; boolean gcPause = false; final RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean(); final long vmStartTime = runtimeBean.getStartTime(); final List<GarbageCollectorMXBean> gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans(); for (GarbageCollectorMXBean gcMXBean : gcMXBeans) { if (gcMXBean instanceof com.sun.management.GarbageCollectorMXBean) { final GcInfo lastGcInfo = ((com.sun.management.GarbageCollectorMXBean) gcMXBean).getLastGcInfo(); // If no GCs, there was no pause due to GC. if (lastGcInfo == null) { continue; } final long gcStartTime = lastGcInfo.getStartTime() + vmStartTime; if (gcStartTime > start) { gcPause = true; final long gcDuration = lastGcInfo.getDuration(); final long pauseDuration = System.currentTimeMillis() - gcStartTime; if (log.isDebugEnabled()) { log.debug("Event Start Time = " + start + "; Last GC Start Time = " + gcStartTime + "; " + (gcStartTime - start) + " msec ago.\n" + "\nTotal pause duration due for this event = " + pauseDuration + " msec.\nTotal GC duration = " + gcDuration + " msec."); } break; } } } if (!gcPause && log.isDebugEnabled()) { log.debug("Total pause duration due to NON-GC event = " + (System.currentTimeMillis() - start) + " msec."); } // redo the same op once more since there was a chance of gc pause status = latch.await(duration, units); if (log.isDebugEnabled()) log.debug("re-await status : " + status); String statusString = EVCacheMetricsFactory.SUCCESS; final long pauseDuration = System.currentTimeMillis() - start; if (op != null && !status) { // whenever timeout occurs, continuous timeout counter will increase by 1. MemcachedConnection.opTimedOut(op); op.timeOut(); ExecutionException t = null; if(throwException && !hasZF) { if (op.isTimedOut()) { t = new ExecutionException(new CheckedOperationTimeoutException("Checked Operation timed out.", op)); statusString = EVCacheMetricsFactory.CHECKED_OP_TIMEOUT; } else if (op.isCancelled() && throwException) { t = new ExecutionException(new CancellationException("Cancelled"));statusString = EVCacheMetricsFactory.CANCELLED; } else if (op.hasErrored() ) { t = new ExecutionException(op.getException());statusString = EVCacheMetricsFactory.ERROR; } } if(t != null) throw t; //finally throw the exception if needed } final List<Tag> tagList = new ArrayList<Tag>(client.getTagList().size() + 4); tagList.addAll(client.getTagList()); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.GET_OPERATION)); tagList.add(new BasicTag(EVCacheMetricsFactory.PAUSE_REASON, gcPause ? EVCacheMetricsFactory.GC:EVCacheMetricsFactory.SCHEDULE)); tagList.add(new BasicTag(EVCacheMetricsFactory.FETCH_AFTER_PAUSE, status ? EVCacheMetricsFactory.YES:EVCacheMetricsFactory.NO)); tagList.add(new BasicTag(EVCacheMetricsFactory.OPERATION_STATUS, statusString)); EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_PAUSE, tagList, Duration.ofMillis(EVCacheConfig.getInstance().getPropertyRepository().get(getApp() + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50).get().intValue())).record(pauseDuration, TimeUnit.MILLISECONDS); return status; } public Single<T> observe() { return Single.create(subscriber -> addListener((EVCacheGetOperationListener<T>) future -> { try { subscriber.onSuccess(get()); } catch (Throwable e) { subscriber.onError(e); } }) ); } static <T> CompletableFuture<T> withTimeout(CompletableFuture<T> future, long timeout, TimeUnit unit) { int timeoutSlots = getTimeoutSlots((int) timeout); // [DABP-2005] split timeout to timeoutSlots slots to not timeout during GC. long splitTimeout = Math.max(1, timeout / timeoutSlots); CompletableFuture<Void> chain = CompletableFuture.completedFuture(null); for (int i = 0; i < timeoutSlots; i++) { final int j = i; chain = chain.thenCompose(unused -> getNext(future, j, timeout, splitTimeout, unit, timeoutSlots)); } return future; } private static int getTimeoutSlots(int timeout) { if(log.isDebugEnabled()) log.debug("Timeout is {}", timeout); int timeoutSlots; int val = timeout /10; if (val == 0 ) { timeoutSlots = 1; } else if (val >= 1 && val < 5) { timeoutSlots = val; } else { timeoutSlots = 5; } if(log.isDebugEnabled()) log.debug("timeoutSlots is {}", timeoutSlots); return timeoutSlots; } private static<T> CompletableFuture<Void> getNext(CompletableFuture<T> future, final int j, long timeout, long splitTimeout, TimeUnit unit, int timeoutSlots) { CompletableFuture<Void> next = new CompletableFuture<>(); if (future.isDone()) { next.complete(null); } else { ScheduledFuture<?> scheduledTimeout; if (j < timeoutSlots - 1) { scheduledTimeout = LazySharedExecutor.executor.schedule( () -> { if(log.isDebugEnabled()) log.debug("Completing now for loop {} and timeout slot {}", j, timeoutSlots); next.complete(null); }, splitTimeout, TimeUnit.MILLISECONDS); } else { scheduledTimeout = LazySharedExecutor.executor.schedule( () -> { next.complete(null); if (future.isDone()) { return; } if(log.isDebugEnabled()) log.warn("Throwing timeout exception after {} {} with timeout slot {}", timeout, unit, timeoutSlots); future.completeExceptionally(new TimeoutException("Timeout after " + timeout)); }, splitTimeout, unit); } // If the completable future completes normally, don't bother timing it out. // Also cleans the ref for GC. future.whenComplete( (r, exp) -> { if (exp == null) { scheduledTimeout.cancel(false); if(log.isDebugEnabled()) log.debug("completing the future"); next.complete(null); } }); } return next; } public <U> CompletableFuture<U> makeFutureWithTimeout(long timeout, TimeUnit units) { final CompletableFuture<U> future = new CompletableFuture<>(); return withTimeout(future, timeout, units); } private void handleException() { if (log.isDebugEnabled()) log.debug("handling the timeout in handleTimeoutException"); if (op != null) { MemcachedConnection.opTimedOut(op); op.timeOut(); ExecutionException t = null; if (op.isTimedOut()) { if (log.isDebugEnabled()) log.debug("Checked Operation timed out with operation {}.", op); t = new ExecutionException(new CheckedOperationTimeoutException("Checked Operation timed out.", op)); } else if (op.isCancelled()) { if (log.isDebugEnabled()) log.debug("Cancelled with operation {}.", op); t = new ExecutionException(new CancellationException("Cancelled")); } else if (op.hasErrored()) { if (log.isDebugEnabled()) log.debug("Other exception with operation {}.", op); t = new ExecutionException(op.getException()); } throw new RuntimeException(t); } } public CompletableFuture<T> getAsync(long timeout, TimeUnit units) { CompletableFuture<T> future = makeFutureWithTimeout(timeout, units); doAsyncGet(future); return future.handle((data, ex) -> { if (ex != null) { handleException(); } return data; }); } private void doAsyncGet(CompletableFuture<T> cf) { EVCacheGetOperationListener<T> listener = future -> { try { T result = future.get(); cf.complete(result); } catch (Exception t) { cf.completeExceptionally(t); } }; this.addListener(listener); } public Single<T> get(long duration, TimeUnit units, boolean throwException, boolean hasZF, Scheduler scheduler) { return observe().timeout(duration, units, Single.create(subscriber -> { // whenever timeout occurs, continuous timeout counter will increase by 1. MemcachedConnection.opTimedOut(op); if (op != null) op.timeOut(); //if (!hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-get-CheckedOperationTimeout", DataSourceType.COUNTER).increment(); if (throwException) { subscriber.onError(new CheckedOperationTimeoutException("Timed out waiting for operation", op)); } else { if (isCancelled()) { //if (hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-get-Cancelled", DataSourceType.COUNTER).increment(); } subscriber.onSuccess(objRef.get()); } }), scheduler).doAfterTerminate(new Action0() { @Override public void call() { } } ); } public void signalComplete() { super.signalComplete(); } /** * Cancel this operation, if possible. * * @param ign not used * @deprecated * @return true if the operation has not yet been written to the network */ public boolean cancel(boolean ign) { if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception()); return super.cancel(ign); } /** * Cancel this operation, if possible. * * @return true if the operation has not yet been written to the network */ public boolean cancel() { if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception()); return super.cancel(); } public long getStartTime() { return start; } }
4,020
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheAsciiOperationFactory.java
package com.netflix.evcache.operation; import net.spy.memcached.protocol.ascii.AsciiOperationFactory; import net.spy.memcached.protocol.ascii.ExecCmdOperation; import net.spy.memcached.protocol.ascii.ExecCmdOperationImpl; import net.spy.memcached.protocol.ascii.MetaDebugOperation; import net.spy.memcached.protocol.ascii.MetaDebugOperationImpl; import net.spy.memcached.protocol.ascii.MetaGetOperation; import net.spy.memcached.protocol.ascii.MetaGetOperationImpl; import net.spy.memcached.protocol.ascii.MetaArithmeticOperationImpl; import net.spy.memcached.ops.Mutator; import net.spy.memcached.ops.MutatorOperation; import net.spy.memcached.ops.OperationCallback; public class EVCacheAsciiOperationFactory extends AsciiOperationFactory { public MetaDebugOperation metaDebug(String key, MetaDebugOperation.Callback cb) { return new MetaDebugOperationImpl(key, cb); } public MetaGetOperation metaGet(String key, MetaGetOperation.Callback cb) { return new MetaGetOperationImpl(key, cb); } public ExecCmdOperation execCmd(String cmd, ExecCmdOperation.Callback cb) { return new ExecCmdOperationImpl(cmd, cb); } public MutatorOperation mutate(Mutator m, String key, long by, long def, int exp, OperationCallback cb) { return new MetaArithmeticOperationImpl(m, key, by, def, exp, cb); } }
4,021
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheFuture.java
package com.netflix.evcache.operation; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.ServerGroup; public class EVCacheFuture implements Future<Boolean> { private static final Logger log = LoggerFactory.getLogger(EVCacheFuture.class); private final Future<Boolean> future; private final String app; private final ServerGroup serverGroup; private final String key; private final EVCacheClient client; public EVCacheFuture(Future<Boolean> future, String key, String app, ServerGroup serverGroup) { this(future, key, app, serverGroup, null); } public EVCacheFuture(Future<Boolean> future, String key, String app, ServerGroup serverGroup, EVCacheClient client) { this.future = future; this.app = app; this.serverGroup = serverGroup; this.key = key; this.client = client; } public Future<Boolean> getFuture() { return future; } @Override public boolean cancel(boolean mayInterruptIfRunning) { if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception()); return future.cancel(mayInterruptIfRunning); } @Override public boolean isCancelled() { return future.isCancelled(); } @Override public boolean isDone() { return future.isDone(); } @Override public Boolean get() throws InterruptedException, ExecutionException { return future.get(); } @Override public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return future.get(timeout, unit); } public String getKey() { return key; } public String getApp() { return app; } public String getZone() { return serverGroup.getZone(); } public String getServerGroupName() { return serverGroup.getName(); } public EVCacheClient getEVCacheClient() { return client; } @Override public String toString() { return "EVCacheFuture [future=" + future + ", app=" + app + ", ServerGroup=" + serverGroup + ", EVCacheClient=" + client + "]"; } }
4,022
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheBulkGetFuture.java
package com.netflix.evcache.operation; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.*; import com.netflix.evcache.EVCacheGetOperationListener; import net.spy.memcached.internal.BulkGetCompletionListener; import net.spy.memcached.internal.CheckedOperationTimeoutException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.ServerGroup; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Tag; import com.sun.management.GcInfo; import net.spy.memcached.MemcachedConnection; import net.spy.memcached.internal.BulkGetFuture; import net.spy.memcached.ops.Operation; import net.spy.memcached.ops.OperationState; import rx.Scheduler; import rx.Single; /** * Future for handling results from bulk gets. * * Not intended for general use. * * types of objects returned from the GETBULK */ @SuppressWarnings("restriction") public class EVCacheBulkGetFuture<T> extends BulkGetFuture<T> { private static final Logger log = LoggerFactory.getLogger(EVCacheBulkGetFuture.class); private final Map<String, Future<T>> rvMap; private final Collection<Operation> ops; private final CountDownLatch latch; private final long start; private final EVCacheClient client; public EVCacheBulkGetFuture(Map<String, Future<T>> m, Collection<Operation> getOps, CountDownLatch l, ExecutorService service, EVCacheClient client) { super(m, getOps, l, service); rvMap = m; ops = getOps; latch = l; this.start = System.currentTimeMillis(); this.client = client; } public Map<String, T> getSome(long to, TimeUnit unit, boolean throwException, boolean hasZF) throws InterruptedException, ExecutionException { boolean status = latch.await(to, unit); if(log.isDebugEnabled()) log.debug("Took " + (System.currentTimeMillis() - start)+ " to fetch " + rvMap.size() + " keys from " + client); long pauseDuration = -1; List<Tag> tagList = null; Collection<Operation> timedoutOps = null; String statusString = EVCacheMetricsFactory.SUCCESS; try { if (!status) { boolean gcPause = false; tagList = new ArrayList<Tag>(7); tagList.addAll(client.getTagList()); tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION)); final RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean(); final long vmStartTime = runtimeBean.getStartTime(); final List<GarbageCollectorMXBean> gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans(); for (GarbageCollectorMXBean gcMXBean : gcMXBeans) { if (gcMXBean instanceof com.sun.management.GarbageCollectorMXBean) { final GcInfo lastGcInfo = ((com.sun.management.GarbageCollectorMXBean) gcMXBean).getLastGcInfo(); // If no GCs, there was no pause. if (lastGcInfo == null) { continue; } final long gcStartTime = lastGcInfo.getStartTime() + vmStartTime; if (gcStartTime > start) { gcPause = true; if (log.isDebugEnabled()) log.debug("Total duration due to gc event = " + lastGcInfo.getDuration() + " msec."); break; } } } // redo the same op once more since there was a chance of gc pause if (gcPause) { status = latch.await(to, unit); tagList.add(new BasicTag(EVCacheMetricsFactory.PAUSE_REASON, EVCacheMetricsFactory.GC)); if (log.isDebugEnabled()) log.debug("Retry status : " + status); if (status) { tagList.add(new BasicTag(EVCacheMetricsFactory.FETCH_AFTER_PAUSE, EVCacheMetricsFactory.YES)); } else { tagList.add(new BasicTag(EVCacheMetricsFactory.FETCH_AFTER_PAUSE, EVCacheMetricsFactory.NO)); } } else { tagList.add(new BasicTag(EVCacheMetricsFactory.PAUSE_REASON, EVCacheMetricsFactory.SCHEDULE)); } pauseDuration = System.currentTimeMillis() - start; if (log.isDebugEnabled()) log.debug("Total duration due to gc event = " + (System.currentTimeMillis() - start) + " msec."); } for (Operation op : ops) { if (op.getState() != OperationState.COMPLETE) { if (!status) { MemcachedConnection.opTimedOut(op); if(timedoutOps == null) timedoutOps = new HashSet<Operation>(); timedoutOps.add(op); } else { MemcachedConnection.opSucceeded(op); } } else { MemcachedConnection.opSucceeded(op); } } if (!status && !hasZF && (timedoutOps != null && timedoutOps.size() > 0)) statusString = EVCacheMetricsFactory.TIMEOUT; for (Operation op : ops) { if(op.isCancelled()) { if (hasZF) statusString = EVCacheMetricsFactory.CANCELLED; if (throwException) throw new ExecutionException(new CancellationException("Cancelled")); } if (op.hasErrored() && throwException) throw new ExecutionException(op.getException()); } Map<String, T> m = new HashMap<String, T>(); for (Map.Entry<String, Future<T>> me : rvMap.entrySet()) { m.put(me.getKey(), me.getValue().get()); } return m; } finally { if(pauseDuration > 0) { tagList.add(new BasicTag(EVCacheMetricsFactory.OPERATION_STATUS, statusString)); EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_PAUSE, tagList, Duration.ofMillis(EVCacheConfig.getInstance().getPropertyRepository().get(getApp() + ".max.read.duration.metric", Integer.class) .orElseGet("evcache.max.read.duration.metric").orElse(20).get().intValue())).record(pauseDuration, TimeUnit.MILLISECONDS); } } } public CompletableFuture<Map<String, T>> getSomeCompletableFuture(long to, TimeUnit unit, boolean throwException, boolean hasZF) { CompletableFuture<Map<String, T>> completableFuture = new CompletableFuture<>(); try { Map<String, T> value = getSome(to, unit, throwException, hasZF); completableFuture.complete(value); } catch (Exception e) { completableFuture.completeExceptionally(e); } return completableFuture; } public Single<Map<String, T>> observe() { return Single.create(subscriber -> addListener(future -> { try { subscriber.onSuccess(get()); } catch (Throwable e) { subscriber.onError(e); } }) ); } public <U> CompletableFuture<U> makeFutureWithTimeout(long timeout, TimeUnit units) { final CompletableFuture<U> future = new CompletableFuture<>(); return EVCacheOperationFuture.withTimeout(future, timeout, units); } public CompletableFuture<Map<String, T>> getAsyncSome(long timeout, TimeUnit units) { CompletableFuture<Map<String, T>> future = makeFutureWithTimeout(timeout, units); doAsyncGetSome(future); return future.handle((data, ex) -> { if (ex != null) { handleBulkException(); } return data; }); } public void handleBulkException() { ExecutionException t = null; for (Operation op : ops) { if (op.getState() != OperationState.COMPLETE) { if (op.isCancelled()) { throw new RuntimeException(new ExecutionException(new CancellationException("Cancelled"))); } else if (op.hasErrored()) { throw new RuntimeException(new ExecutionException(op.getException())); } else { op.timeOut(); MemcachedConnection.opTimedOut(op); t = new ExecutionException(new CheckedOperationTimeoutException("Checked Operation timed out.", op)); } } else { MemcachedConnection.opSucceeded(op); } } throw new RuntimeException(t); } public void doAsyncGetSome(CompletableFuture<Map<String, T>> promise) { this.addListener(future -> { try { Map<String, T> m = new HashMap<>(); Map<String, ?> result = future.get(); for (Map.Entry<String, ?> me : result.entrySet()) { m.put(me.getKey(), (T)me.getValue()); } promise.complete(m); } catch (Exception t) { promise.completeExceptionally(t); } }); } public Single<Map<String, T>> getSome(long to, TimeUnit units, boolean throwException, boolean hasZF, Scheduler scheduler) { return observe().timeout(to, units, Single.create(subscriber -> { try { final Collection<Operation> timedoutOps = new HashSet<Operation>(); for (Operation op : ops) { if (op.getState() != OperationState.COMPLETE) { MemcachedConnection.opTimedOut(op); timedoutOps.add(op); } else { MemcachedConnection.opSucceeded(op); } } //if (!hasZF && timedoutOps.size() > 0) EVCacheMetricsFactory.getInstance().increment(client.getAppName() + "-getSome-CheckedOperationTimeout", client.getTagList()); for (Operation op : ops) { if (op.isCancelled() && throwException) throw new ExecutionException(new CancellationException("Cancelled")); if (op.hasErrored() && throwException) throw new ExecutionException(op.getException()); } Map<String, T> m = new HashMap<String, T>(); for (Map.Entry<String, Future<T>> me : rvMap.entrySet()) { m.put(me.getKey(), me.getValue().get()); } subscriber.onSuccess(m); } catch (Throwable e) { subscriber.onError(e); } }), scheduler); } public String getZone() { return client.getServerGroupName(); } public ServerGroup getServerGroup() { return client.getServerGroup(); } public String getApp() { return client.getAppName(); } public Set<String> getKeys() { return Collections.unmodifiableSet(rvMap.keySet()); } public void signalComplete() { super.signalComplete(); } public boolean cancel(boolean ign) { if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception()); return super.cancel(ign); } public long getStartTime() { return start; } }
4,023
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheLatchImpl.java
package com.netflix.evcache.operation; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.EVCacheLatch; import com.netflix.evcache.event.EVCacheEvent; import com.netflix.evcache.event.EVCacheEventListener; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPool; import com.netflix.evcache.pool.ServerGroup; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Tag; import com.netflix.spectator.ipc.IpcStatus; import net.spy.memcached.internal.ListenableFuture; import net.spy.memcached.internal.OperationCompletionListener; import net.spy.memcached.internal.OperationFuture; import net.spy.memcached.ops.StatusCode; public class EVCacheLatchImpl implements EVCacheLatch, Runnable { private static final Logger log = LoggerFactory.getLogger(EVCacheLatchImpl.class); private final int expectedCompleteCount; private final CountDownLatch latch; private final List<Future<Boolean>> futures; private final Policy policy; private final int totalFutureCount; private final long start; private final String appName; private EVCacheEvent evcacheEvent = null; private boolean onCompleteDone = false; private int completeCount = 0; private int failureCount = 0; private String failReason = null; private ScheduledFuture<?> scheduledFuture; public EVCacheLatchImpl(Policy policy, int _count, String appName) { this.start = System.currentTimeMillis(); this.policy = policy; this.futures = new ArrayList<Future<Boolean>>(_count); this.appName = appName; this.totalFutureCount = _count; this.expectedCompleteCount = policyToCount(policy, _count); this.latch = new CountDownLatch(expectedCompleteCount); if (log.isDebugEnabled()) log.debug("Number of Futures = " + _count + "; Number of Futures that need to completed for Latch to be released = " + this.expectedCompleteCount); } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#await(long,java.util.concurrent.TimeUnit) */ @Override public boolean await(long timeout, TimeUnit unit) throws InterruptedException { if (log.isDebugEnabled()) log.debug("Current Latch Count = " + latch.getCount() + "; await for "+ timeout + " " + unit.name() + " appName : " + appName); final long start = log.isDebugEnabled() ? System.currentTimeMillis() : 0; final boolean awaitSuccess = latch.await(timeout, unit); if (log.isDebugEnabled()) log.debug("await success = " + awaitSuccess + " after " + (System.currentTimeMillis() - start) + " msec." + " appName : " + appName + ((evcacheEvent != null) ? " keys : " + evcacheEvent.getEVCacheKeys() : "")); return awaitSuccess; } /* * (non-Javadoc) * * @see * com.netflix.evcache.operation.EVCacheLatchI#addFuture(net.spy.memcached.internal.ListenableFuture) */ public void addFuture(ListenableFuture<Boolean, OperationCompletionListener> future) { future.addListener(this); if (future.isDone()) countDown(); this.futures.add(future); } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#isDone() */ @Override public boolean isDone() { if (latch.getCount() == 0) return true; return false; } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#countDown() */ public void countDown() { if (log.isDebugEnabled()) log.debug("Current Latch Count = " + latch.getCount() + "; Count Down."); latch.countDown(); } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getPendingCount() */ @Override public int getPendingCount() { if (log.isDebugEnabled()) log.debug("Pending Count = " + latch.getCount()); return (int) latch.getCount(); } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getCompletedCount() */ @Override public int getCompletedCount() { if (log.isDebugEnabled()) log.debug("Completed Count = " + completeCount); return completeCount; } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getPendingFutures() */ @Override public List<Future<Boolean>> getPendingFutures() { final List<Future<Boolean>> returnFutures = new ArrayList<Future<Boolean>>(expectedCompleteCount); for (Future<Boolean> future : futures) { if (!future.isDone()) { returnFutures.add(future); } } return returnFutures; } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getAllFutures() */ @Override public List<Future<Boolean>> getAllFutures() { return this.futures; } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getCompletedFutures() */ @Override public List<Future<Boolean>> getCompletedFutures() { final List<Future<Boolean>> returnFutures = new ArrayList<Future<Boolean>>(expectedCompleteCount); for (Future<Boolean> future : futures) { if (future.isDone()) { returnFutures.add(future); } } return returnFutures; } private int policyToCount(Policy policy, int count) { if (policy == null || count == 0) return 0; switch (policy) { case NONE: return 0; case ONE: return 1; case QUORUM: if (count <= 2) return 1; else return (futures.size() / 2) + 1; case ALL_MINUS_1: if (count <= 2) return 1; else return count - 1; default: return count; } } public void setEVCacheEvent(EVCacheEvent e) { this.evcacheEvent = e; } /* * (non-Javadoc) * * @see * com.netflix.evcache.operation.EVCacheLatchI#onComplete(net.spy.memcached.internal.OperationFuture) */ @Override public void onComplete(OperationFuture<?> future) throws Exception { if (log.isDebugEnabled()) log.debug("BEGIN : onComplete - Calling Countdown. Completed Future = " + future + "; App : " + appName); countDown(); completeCount++; if(evcacheEvent != null) { if (log.isDebugEnabled()) log.debug(";App : " + evcacheEvent.getAppName() + "; Call : " + evcacheEvent.getCall() + "; Keys : " + evcacheEvent.getEVCacheKeys() + "; completeCount : " + completeCount + "; totalFutureCount : " + totalFutureCount +"; failureCount : " + failureCount); try { if(future.isDone() && future.get().equals(Boolean.FALSE)) { failureCount++; if(failReason == null) failReason = EVCacheMetricsFactory.getInstance().getStatusCode(future.getStatus().getStatusCode()); } } catch (Exception e) { failureCount++; if(failReason == null) failReason = IpcStatus.unexpected_error.name(); if(log.isDebugEnabled()) log.debug(e.getMessage(), e); } if(!onCompleteDone && getCompletedCount() >= getExpectedSuccessCount()) { if(evcacheEvent.getClients().size() > 0) { for(EVCacheClient client : evcacheEvent.getClients()) { final List<EVCacheEventListener> evcacheEventListenerList = client.getPool().getEVCacheClientPoolManager().getEVCacheEventListeners(); for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) { evcacheEventListener.onComplete(evcacheEvent); } onCompleteDone = true;//This ensures we fire onComplete only once break; } } } if(scheduledFuture != null) { final boolean futureCancelled = scheduledFuture.isCancelled(); if (log.isDebugEnabled()) log.debug("App : " + evcacheEvent.getAppName() + "; Call : " + evcacheEvent.getCall() + "; Keys : " + evcacheEvent.getEVCacheKeys() + "; completeCount : " + completeCount + "; totalFutureCount : " + totalFutureCount +"; failureCount : " + failureCount + "; futureCancelled : " + futureCancelled); if(onCompleteDone && !futureCancelled) { if(completeCount == totalFutureCount && failureCount == 0) { // all futures are completed final boolean status = scheduledFuture.cancel(true); run();//TODO: should we reschedule this method to run as part of EVCacheScheduledExecutor instead of running on the callback thread if (log.isDebugEnabled()) log.debug("Cancelled the scheduled task : " + status); } } } if (log.isDebugEnabled()) log.debug("App : " + evcacheEvent.getAppName() + "; Call : " + evcacheEvent.getCall() + "; Keys : " + evcacheEvent.getEVCacheKeys() + "; completeCount : " + completeCount + "; totalFutureCount : " + totalFutureCount +"; failureCount : " + failureCount); } if(totalFutureCount == completeCount) { final List<Tag> tags = new ArrayList<Tag>(5); EVCacheMetricsFactory.getInstance().addAppNameTags(tags, appName); if(evcacheEvent != null) tags.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, evcacheEvent.getCall().name())); tags.add(new BasicTag(EVCacheMetricsFactory.FAIL_COUNT, String.valueOf(failureCount))); tags.add(new BasicTag(EVCacheMetricsFactory.COMPLETE_COUNT, String.valueOf(completeCount))); if(failReason != null) tags.add(new BasicTag(EVCacheMetricsFactory.IPC_STATUS, failReason)); //tags.add(new BasicTag(EVCacheMetricsFactory.OPERATION, EVCacheMetricsFactory.CALLBACK)); EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_LATCH, tags, Duration.ofMillis(EVCacheConfig.getInstance().getPropertyRepository().get(getAppName() + ".max.write.duration.metric", Integer.class) .orElseGet("evcache.max.write.duration.metric").orElse(50).get().intValue())).record(System.currentTimeMillis()- start, TimeUnit.MILLISECONDS); } if (log.isDebugEnabled()) log.debug("END : onComplete - Calling Countdown. Completed Future = " + future + "; App : " + appName); } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getFailureCount() */ @Override public int getFailureCount() { int fail = 0; for (Future<Boolean> future : futures) { try { if (future.isDone() && future.get().equals(Boolean.FALSE)) { fail++; } } catch (Exception e) { fail++; log.error(e.getMessage(), e); } } return fail; } /* * (non-Javadoc) * * @see * com.netflix.evcache.operation.EVCacheLatchI#getExpectedCompleteCount() */ @Override public int getExpectedCompleteCount() { return this.expectedCompleteCount; } /* * (non-Javadoc) * * @see * com.netflix.evcache.operation.EVCacheLatchI#getExpectedSuccessCount() */ @Override public int getExpectedSuccessCount() { return this.expectedCompleteCount; } /* * (non-Javadoc) * * @see com.netflix.evcache.operation.EVCacheLatchI#getSuccessCount() */ @Override public int getSuccessCount() { int success = 0; for (Future<Boolean> future : futures) { try { if (future.isDone() && future.get().equals(Boolean.TRUE)) { success++; } } catch (Exception e) { log.error(e.getMessage(), e); } } return success; } public String getAppName() { return appName; } public Policy getPolicy() { return this.policy; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{\"AppName\":\""); builder.append(getAppName()); builder.append("\",\"isDone\":\""); builder.append(isDone()); builder.append("\",\"Pending Count\":\""); builder.append(getPendingCount()); builder.append("\",\"Completed Count\":\""); builder.append(getCompletedCount()); builder.append("\",\"Pending Futures\":\""); builder.append(getPendingFutures()); builder.append("\",\"All Futures\":\""); builder.append(getAllFutures()); builder.append("\",\"Completed Futures\":\""); builder.append(getCompletedFutures()); builder.append("\",\"Failure Count\":\""); builder.append(getFailureCount()); builder.append("\",\"Success Count\":\""); builder.append(getSuccessCount()); builder.append("\",\"Excpected Success Count\":\""); builder.append(getExpectedSuccessCount()); builder.append("\"}"); return builder.toString(); } @Override public int getPendingFutureCount() { int count = 0; for (Future<Boolean> future : futures) { if (!future.isDone()) { count++; } } return count; } @Override public int getCompletedFutureCount() { int count = 0; for (Future<Boolean> future : futures) { if (future.isDone()) { count++; } } return count; } public boolean isFastFailure() { return (totalFutureCount == 0); } @SuppressWarnings("unchecked") @Override public void run() { if(evcacheEvent != null) { int failCount = 0, completeCount = 0; for (Future<Boolean> future : futures) { boolean fail = false; try { if(future.isDone()) { fail = future.get(0, TimeUnit.MILLISECONDS).equals(Boolean.FALSE); } else { long delayms = 0; if(scheduledFuture != null) { delayms = scheduledFuture.getDelay(TimeUnit.MILLISECONDS); } if(delayms < 0 ) delayms = 0;//making sure wait is not negative. It might be ok but as this is implementation dependent let us stick with 0 fail = future.get(delayms, TimeUnit.MILLISECONDS).equals(Boolean.FALSE); } } catch (Exception e) { fail = true; if(log.isDebugEnabled()) log.debug(e.getMessage(), e); } if (fail) { if(future instanceof EVCacheOperationFuture) { final EVCacheOperationFuture<Boolean> evcFuture = (EVCacheOperationFuture<Boolean>)future; final StatusCode code = evcFuture.getStatus().getStatusCode(); if(code != StatusCode.SUCCESS && code != StatusCode.ERR_NOT_FOUND && code != StatusCode.ERR_EXISTS) { List<ServerGroup> listOfFailedServerGroups = (List<ServerGroup>) evcacheEvent.getAttribute("FailedServerGroups"); if(listOfFailedServerGroups == null) { listOfFailedServerGroups = new ArrayList<ServerGroup>(failCount); evcacheEvent.setAttribute("FailedServerGroups", listOfFailedServerGroups); } listOfFailedServerGroups.add(evcFuture.getServerGroup()); failCount++; } } else { failCount++; } } else { completeCount++; } } if(log.isDebugEnabled()) log.debug("Fail Count : " + failCount); if(failCount > 0) { if(evcacheEvent.getClients().size() > 0) { for(EVCacheClient client : evcacheEvent.getClients()) { final List<EVCacheEventListener> evcacheEventListenerList = client.getPool().getEVCacheClientPoolManager().getEVCacheEventListeners(); if(log.isDebugEnabled()) log.debug("\nClient : " + client +"\nEvcacheEventListenerList : " + evcacheEventListenerList); for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) { evcacheEventListener.onError(evcacheEvent, null); } break; } } } final List<Tag> tags = new ArrayList<Tag>(5); EVCacheMetricsFactory.getInstance().addAppNameTags(tags, appName); if(evcacheEvent != null) tags.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, evcacheEvent.getCall().name())); //tags.add(new BasicTag(EVCacheMetricsFactory.OPERATION, EVCacheMetricsFactory.VERIFY)); tags.add(new BasicTag(EVCacheMetricsFactory.FAIL_COUNT, String.valueOf(failCount))); tags.add(new BasicTag(EVCacheMetricsFactory.COMPLETE_COUNT, String.valueOf(completeCount))); EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.INTERNAL_LATCH_VERIFY, tags); } } @Override public int hashCode() { return ((evcacheEvent == null) ? 0 : evcacheEvent.hashCode()); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EVCacheLatchImpl other = (EVCacheLatchImpl) obj; if (appName == null) { if (other.appName != null) return false; } else if (!appName.equals(other.appName)) return false; if (evcacheEvent == null) { if (other.evcacheEvent != null) return false; } else if (!evcacheEvent.equals(other.evcacheEvent)) return false; return true; } public void setScheduledFuture(ScheduledFuture<?> scheduledFuture) { this.scheduledFuture = scheduledFuture; } public void scheduledFutureValidation() { if(evcacheEvent != null) { final EVCacheClientPool pool = evcacheEvent.getEVCacheClientPool(); final ScheduledFuture<?> scheduledFuture = pool.getEVCacheClientPoolManager().getEVCacheScheduledExecutor().schedule(this, pool.getOperationTimeout().get(), TimeUnit.MILLISECONDS); setScheduledFuture(scheduledFuture); } else { if(log.isWarnEnabled()) log.warn("Future cannot be scheduled as EVCacheEvent is null!"); } } }
4,024
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheItemMetaData.java
package com.netflix.evcache.operation; /** * <B><u>Meta </u></B> * <br> * The meta debug command is a human readable dump of all available internal * metadata of an item, minus the value.<br> * <br> * <b><i>me &lt;key&gt;r\n</i></b><br> * <br> * <key> means one key string.<br> * <br> * The response looks like:<br> * <br> * <b><i>ME &lt;key&gt; &lt;k&gt;=&lt;v&gt;*\r\nEN\r\n</i></b><br> * <br> * For Ex: <br> * <pre> * me img:bil:360465414627441161 ME img:bil:360465414627441161 exp=-549784 la=55016 cas=0 fetch=yes cls=5 size=237 EN </pre> * <br> * Each of the keys and values are the internal data for the item.<br> * <br> * exp = expiration time<br> * la = time in seconds since last access<br> * cas = CAS ID<br> * fetch = whether an item has been fetched before<br> * cls = slab class id<br> * size = total size in bytes<br> * <br> * @author smadappa * */ public class EVCacheItemMetaData { public long secondsLeftToExpire; public long secondsSinceLastAccess; public long cas; public boolean hasBeenFetchedAfterWrite; public int slabClass; public int sizeInBytes; public EVCacheItemMetaData() { super(); } public void setSecondsLeftToExpire(long secondsLeftToExpire) { this.secondsLeftToExpire = secondsLeftToExpire; } public void setSecondsSinceLastAccess(long secondsSinceLastAccess) { this.secondsSinceLastAccess = secondsSinceLastAccess; } public void setCas(long cas) { this.cas = cas; } public void setHasBeenFetchedAfterWrite(boolean hasBeenFetchedAfterWrite) { this.hasBeenFetchedAfterWrite = hasBeenFetchedAfterWrite; } public void setSlabClass(int slabClass) { this.slabClass = slabClass; } public void setSizeInBytes(int sizeInBytes) { this.sizeInBytes = sizeInBytes; } public long getSecondsLeftToExpire() { return secondsLeftToExpire; } public long getSecondsSinceLastAccess() { return secondsSinceLastAccess; } public long getCas() { return cas; } public boolean isHasBeenFetchedAfterWrite() { return hasBeenFetchedAfterWrite; } public int getSlabClass() { return slabClass; } public int getSizeInBytes() { return sizeInBytes; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (int) (cas ^ (cas >>> 32)); result = prime * result + (hasBeenFetchedAfterWrite ? 1231 : 1237); result = prime * result + (int) (secondsLeftToExpire ^ (secondsLeftToExpire >>> 32)); result = prime * result + (int) (secondsSinceLastAccess ^ (secondsSinceLastAccess >>> 32)); result = prime * result + sizeInBytes; result = prime * result + slabClass; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EVCacheItemMetaData other = (EVCacheItemMetaData) obj; if (cas != other.cas) return false; if (hasBeenFetchedAfterWrite != other.hasBeenFetchedAfterWrite) return false; if (secondsLeftToExpire != other.secondsLeftToExpire) return false; if (secondsSinceLastAccess != other.secondsSinceLastAccess) return false; if (sizeInBytes != other.sizeInBytes) return false; if (slabClass != other.slabClass) return false; return true; } @Override public String toString() { return "EVCacheItemMetaData [secondsLeftToExpire=" + secondsLeftToExpire + ", secondsSinceLastAccess=" + secondsSinceLastAccess + ", cas=" + cas + ", hasBeenFetchedAfterWrite=" + hasBeenFetchedAfterWrite + ", slabClass=" + slabClass + ", sizeInBytes=" + sizeInBytes + "]"; } }
4,025
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheFutures.java
package com.netflix.evcache.operation; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.EVCacheLatch; import com.netflix.evcache.pool.ServerGroup; import net.spy.memcached.internal.ListenableFuture; import net.spy.memcached.internal.OperationCompletionListener; import net.spy.memcached.internal.OperationFuture; @edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "DE_MIGHT_IGNORE", "EI_EXPOSE_REP2" }) public class EVCacheFutures implements ListenableFuture<Boolean, OperationCompletionListener>, OperationCompletionListener { private static final Logger log = LoggerFactory.getLogger(EVCacheFutures.class); private final OperationFuture<Boolean>[] futures; private final String app; private final ServerGroup serverGroup; private final String key; private final AtomicInteger completionCounter; private final EVCacheLatch latch; public EVCacheFutures(OperationFuture<Boolean>[] futures, String key, String app, ServerGroup serverGroup, EVCacheLatch latch) { this.futures = futures; this.app = app; this.serverGroup = serverGroup; this.key = key; this.latch = latch; this.completionCounter = new AtomicInteger(futures.length); if (latch != null && latch instanceof EVCacheLatchImpl) ((EVCacheLatchImpl) latch).addFuture(this); for (int i = 0; i < futures.length; i++) { final OperationFuture<Boolean> of = futures[i]; if (of.isDone()) { try { onComplete(of); } catch (Exception e) { } } else { of.addListener(this); } } } public boolean cancel(boolean mayInterruptIfRunning) { if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception()); for (OperationFuture<Boolean> future : futures) { future.cancel(); } return true; } @Override public boolean isCancelled() { for (OperationFuture<Boolean> future : futures) { if (future.isCancelled() == false) return false; } return true; } @Override public boolean isDone() { for (OperationFuture<Boolean> future : futures) { if (future.isDone() == false) return false; } return true; } @Override public Boolean get() throws InterruptedException, ExecutionException { for (OperationFuture<Boolean> future : futures) { if (future.get() == false) return false; } return true; } @Override public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { for (OperationFuture<Boolean> future : futures) { if (future.get(timeout, unit) == false) return false; } return true; } public String getKey() { return key; } public String getApp() { return app; } public String getZone() { return serverGroup.getZone(); } public String getServerGroupName() { return serverGroup.getName(); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("EVCacheFutures [futures=["); for (OperationFuture<Boolean> future : futures) sb.append(future); sb.append("], app=").append(app).append(", ServerGroup=").append(serverGroup.toString()).append("]"); return sb.toString(); } @Override public void onComplete(OperationFuture<?> future) throws Exception { int val = completionCounter.decrementAndGet(); if (val == 0) { if (latch != null) latch.onComplete(future);// Pass the last future to get completed } } @Override public Future<Boolean> addListener(OperationCompletionListener listener) { return this; } @Override public Future<Boolean> removeListener(OperationCompletionListener listener) { return this; } }
4,026
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/metrics/EVCacheMetricsFactory.java
package com.netflix.evcache.metrics; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.DistributionSummary; import com.netflix.spectator.api.Id; import com.netflix.spectator.api.Registry; import com.netflix.spectator.api.Spectator; import com.netflix.spectator.api.Tag; import com.netflix.spectator.api.Timer; import com.netflix.spectator.api.histogram.PercentileTimer; import com.netflix.spectator.ipc.IpcStatus; import net.spy.memcached.ops.StatusCode; @SuppressWarnings("deprecation") @edu.umd.cs.findbugs.annotations.SuppressFBWarnings(value = { "NF_LOCAL_FAST_PROPERTY", "PMB_POSSIBLE_MEMORY_BLOAT" }, justification = "Creates only when needed") public final class EVCacheMetricsFactory { private final Map<String, Number> monitorMap = new ConcurrentHashMap<String, Number>(); private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>(); private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>(); private final Lock writeLock = (new ReentrantReadWriteLock()).writeLock(); private final Map<String, Timer> timerMap = new HashMap<String, Timer>(); private static final EVCacheMetricsFactory INSTANCE = new EVCacheMetricsFactory(); private EVCacheMetricsFactory() { } public static EVCacheMetricsFactory getInstance() { return INSTANCE; } public Map<String, Counter> getAllCounters() { return counterMap; } public Map<String, Timer> getAllTimers() { return timerMap; } public Map<String, Number> getAllMonitor() { return monitorMap; } public Map<String, DistributionSummary> getAllDistributionSummaryMap() { return distributionSummaryMap; } public Registry getRegistry() { return Spectator.globalRegistry(); } public AtomicLong getLongGauge(String name) { return getLongGauge(name, null); } public AtomicLong getLongGauge(String cName, Collection<Tag> tags) { final String name = tags != null ? cName + tags.toString() : cName; AtomicLong gauge = (AtomicLong)monitorMap.get(name); if (gauge == null) { writeLock.lock(); try { if (monitorMap.containsKey(name)) { gauge = (AtomicLong)monitorMap.get(name); } else { if(tags != null) { final Id id = getId(cName, tags); gauge = getRegistry().gauge(id, new AtomicLong()); } else { final Id id = getId(cName, null); gauge = getRegistry().gauge(id, new AtomicLong()); } monitorMap.put(name, gauge); } } finally { writeLock.unlock(); } } return gauge; } private void addCommonTags(List<Tag> tagList) { tagList.add(new BasicTag(OWNER, "evcache")); final String additionalTags = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.additional.tags", String.class).orElse(null).get(); if(additionalTags != null && additionalTags.length() > 0) { final StringTokenizer st = new StringTokenizer(additionalTags, ","); while(st.hasMoreTokens()) { final String token = st.nextToken().trim(); String val = System.getProperty(token); if(val == null) val = System.getenv(token); if(val != null) tagList.add(new BasicTag(token, val)); } } } public void addAppNameTags(List<Tag> tagList, String appName) { tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE, appName)); tagList.add(new BasicTag(EVCacheMetricsFactory.ID, appName)); } public Id getId(String name, Collection<Tag> tags) { final List<Tag> tagList = new ArrayList<Tag>(); if(tags != null) tagList.addAll(tags); addCommonTags(tagList); return getRegistry().createId(name, tagList); } public Counter getCounter(String cName, Collection<Tag> tags) { final String name = tags != null ? cName + tags.toString() : cName; Counter counter = counterMap.get(name); if (counter == null) { writeLock.lock(); try { if (counterMap.containsKey(name)) { counter = counterMap.get(name); } else { List<Tag> tagList = new ArrayList<Tag>(tags.size() + 1); tagList.addAll(tags); final Id id = getId(cName, tagList); counter = getRegistry().counter(id); counterMap.put(name, counter); } } finally { writeLock.unlock(); } } return counter; } public Counter getCounter(String name) { return getCounter(name, null); } public void increment(String name) { final Counter counter = getCounter(name); counter.increment(); } public void increment(String cName, Collection<Tag> tags) { final Counter counter = getCounter(cName, tags); counter.increment(); } @Deprecated public Timer getPercentileTimer(String metric, Collection<Tag> tags) { return getPercentileTimer(metric, tags, Duration.ofMillis(100)); } public Timer getPercentileTimer(String metric, Collection<Tag> tags, Duration max) { final String name = tags != null ? metric + tags.toString() : metric; final Timer duration = timerMap.get(name); if (duration != null) return duration; writeLock.lock(); try { if (timerMap.containsKey(name)) return timerMap.get(name); else { Id id = getId(metric, tags); final Timer _duration = PercentileTimer.builder(getRegistry()).withId(id).withRange(Duration.ofNanos(100000), max).build(); timerMap.put(name, _duration); return _duration; } } finally { writeLock.unlock(); } } public DistributionSummary getDistributionSummary(String name, Collection<Tag> tags) { final String metricName = (tags != null ) ? name + tags.toString() : name; final DistributionSummary _ds = distributionSummaryMap.get(metricName); if(_ds != null) return _ds; final Registry registry = Spectator.globalRegistry(); if (registry != null) { Id id = getId(name, tags); final DistributionSummary ds = registry.distributionSummary(id); distributionSummaryMap.put(metricName, ds); return ds; } return null; } public String getStatusCode(StatusCode sc) { switch(sc) { case CANCELLED : return IpcStatus.cancelled.name(); case TIMEDOUT : return IpcStatus.timeout.name(); case INTERRUPTED : return EVCacheMetricsFactory.INTERRUPTED; case SUCCESS : return IpcStatus.success.name(); case ERR_NOT_FOUND: return "not_found"; case ERR_EXISTS: return "exists"; case ERR_2BIG: return "too_big"; case ERR_INVAL: return "invalid"; case ERR_NOT_STORED: return "not_stored"; case ERR_DELTA_BADVAL: return "bad_value"; case ERR_NOT_MY_VBUCKET: return "not_my_vbucket"; case ERR_UNKNOWN_COMMAND: return "unknown_command"; case ERR_NO_MEM: return "no_mem"; case ERR_NOT_SUPPORTED: return "not_supported"; case ERR_INTERNAL: return "error_internal"; case ERR_BUSY: return "error_busy"; case ERR_TEMP_FAIL: return "temp_failure"; case ERR_CLIENT : return "error_client"; default : return sc.name().toLowerCase(); } } /** * External Metric Names */ public static final String OVERALL_CALL = "evcache.client.call"; public static final String OVERALL_KEYS_SIZE = "evcache.client.call.keys.size"; public static final String COMPRESSION_RATIO = "evcache.client.compression.ratio"; /** * External IPC Metric Names */ public static final String IPC_CALL = "ipc.client.call"; public static final String IPC_SIZE_INBOUND = "ipc.client.call.size.inbound"; public static final String IPC_SIZE_OUTBOUND = "ipc.client.call.size.outbound"; public static final String OWNER = "owner"; public static final String ID = "id"; /** * Internal Metric Names */ public static final String CONFIG = "internal.evc.client.config"; public static final String DATA_SIZE = "internal.evc.client.datasize"; public static final String IN_MEMORY = "internal.evc.client.inmemorycache"; public static final String FAST_FAIL = "internal.evc.client.fastfail"; public static final String INTERNAL_OPERATION = "internal.evc.client.operation"; public static final String INTERNAL_PAUSE = "internal.evc.client.pause"; public static final String INTERNAL_LATCH = "internal.evc.client.latch"; public static final String INTERNAL_LATCH_VERIFY = "internal.evc.client.latch.verify"; public static final String INTERNAL_FAIL = "internal.evc.client.fail"; public static final String INTERNAL_EVENT_FAIL = "internal.evc.client.event.fail"; public static final String INTERNAL_RECONNECT = "internal.evc.client.reconnect"; public static final String INTERNAL_EXECUTOR = "internal.evc.client.executor"; public static final String INTERNAL_EXECUTOR_SCHEDULED = "internal.evc.client.scheduledExecutor"; public static final String INTERNAL_POOL_INIT_ERROR = "internal.evc.client.init.error"; public static final String INTERNAL_NUM_CHUNK_SIZE = "internal.evc.client.chunking.numOfChunks"; public static final String INTERNAL_CHUNK_DATA_SIZE = "internal.evc.client.chunking.dataSize"; public static final String INTERNAL_ADD_CALL_FIXUP = "internal.evc.client.addCall.fixUp"; public static final String INTERNAL_POOL_SG_CONFIG = "internal.evc.client.pool.asg.config"; public static final String INTERNAL_POOL_CONFIG = "internal.evc.client.pool.config"; public static final String INTERNAL_POOL_REFRESH = "internal.evc.client.pool.refresh"; public static final String INTERNAL_BOOTSTRAP_EUREKA = "internal.evc.client.pool.bootstrap.eureka"; public static final String INTERNAL_STATS = "internal.evc.client.stats"; public static final String INTERNAL_TTL = "internal.evc.item.ttl"; /* * Internal pool config values */ public static final String POOL_READ_INSTANCES = "readInstances"; public static final String POOL_WRITE_INSTANCES = "writeInstances"; public static final String POOL_RECONCILE = "reconcile"; public static final String POOL_CHANGED = "asgChanged"; public static final String POOL_SERVERGROUP_STATUS = "asgStatus"; public static final String POOL_READ_Q_SIZE = "readQueue"; public static final String POOL_WRITE_Q_SIZE = "writeQueue"; public static final String POOL_REFRESH_QUEUE_FULL = "refreshOnQueueFull"; public static final String POOL_REFRESH_ASYNC = "refreshAsync"; public static final String POOL_OPERATIONS = "operations"; /** * Metric Tags Names */ public static final String CACHE = "ipc.server.app"; public static final String SERVERGROUP = "ipc.server.asg"; public static final String ZONE = "ipc.server.zone"; public static final String ATTEMPT = "ipc.attempt"; public static final String IPC_RESULT = "ipc.result"; public static final String IPC_STATUS = "ipc.status"; //public static final String FAIL_REASON = "ipc.error.group"; /* * Metric Tags moved to IPC format */ public static final String CALL_TAG = "evc.call"; public static final String CALL_TYPE_TAG = "evc.call.type"; public static final String CACHE_HIT = "evc.cache.hit"; public static final String CONNECTION_ID = "evc.connection.id"; public static final String TTL = "evc.ttl"; public static final String PAUSE_REASON = "evc.pause.reason"; public static final String LATCH = "evc.latch"; public static final String FAIL_COUNT = "evc.fail.count"; public static final String COMPLETE_COUNT = "evc.complete.count"; public static final String RECONNECT_COUNT = "evc.reconnect.count"; public static final String FETCH_AFTER_PAUSE = "evc.fetch.after.pause"; public static final String FAILED_SERVERGROUP = "evc.failed.asg"; public static final String CONFIG_NAME = "evc.config"; public static final String STAT_NAME = "evc.stat.name"; public static final String FAILED_HOST = "evc.failed.host"; public static final String OPERATION = "evc.operation"; public static final String OPERATION_STATUS = "evc.operation.status"; public static final String NUMBER_OF_ATTEMPTS = "evc.attempts"; public static final String NUMBER_OF_KEYS = "evc.keys.count"; public static final String METRIC = "evc.metric"; public static final String FAILURE_REASON = "evc.fail.reason"; public static final String PREFIX = "evc.prefix"; public static final String EVENT = "evc.event"; public static final String EVENT_STAGE = "evc.event.stage"; public static final String CONNECTION = "evc.connection.type"; public static final String TLS = "evc.connection.tls"; public static final String COMPRESSION_TYPE = "evc.compression.type"; /** * Metric Tags Values */ public static final String SIZE = "size"; public static final String PORT = "port"; public static final String CONNECT = "connect"; public static final String DISCONNECT = "disconnect"; public static final String SUCCESS = "success"; public static final String FAIL = "failure"; public static final String TIMEOUT = "timeout"; public static final String CHECKED_OP_TIMEOUT = "CheckedOperationTimeout"; public static final String CANCELLED = "cancelled"; public static final String THROTTLED = "throttled"; public static final String ERROR = "error"; public static final String READ = "read"; public static final String WRITE = "write"; public static final String YES = "yes"; public static final String NO = "no"; public static final String PARTIAL = "partial"; public static final String UNKNOWN = "unknown"; public static final String INTERRUPTED = "interrupted"; public static final String SCHEDULE = "Scheduling"; public static final String GC = "gc"; public static final String NULL_CLIENT = "nullClient"; public static final String INVALID_TTL = "invalidTTL"; public static final String NULL_ZONE = "nullZone"; public static final String NULL_SERVERGROUP = "nullASG"; public static final String RECONNECT = "reconnect"; public static final String CALLBACK = "callback"; public static final String VERIFY = "verify"; public static final String READ_QUEUE_FULL = "readQueueFull"; public static final String INACTIVE_NODE = "inactiveNode"; public static final String IGNORE_INACTIVE_NODES = "ignoreInactiveNode"; public static final String INCORRECT_CHUNKS = "incorrectNumOfChunks"; public static final String INVALID_CHUNK_SIZE = "invalidChunkSize"; public static final String CHECK_SUM_ERROR = "checkSumError"; public static final String KEY_HASH_COLLISION = "KeyHashCollision"; public static final String NUM_CHUNK_SIZE = "numOfChunks"; public static final String CHUNK_DATA_SIZE = "dataSize"; public static final String NOT_AVAILABLE = "notAvailable"; public static final String NOT_ACTIVE = "notActive"; public static final String WRONG_KEY_RETURNED = "wrongKeyReturned"; public static final String INITIAL = "initial"; public static final String SECOND = "second"; public static final String THIRD_UP = "third_up"; /** * Metric Tag Value for Operations */ public static final String BULK_OPERATION = "BULK"; public static final String GET_OPERATION = "GET"; public static final String GET_AND_TOUCH_OPERATION = "GET_AND_TOUCH"; public static final String DELETE_OPERATION = "DELETE"; public static final String TOUCH_OPERATION = "TOUCH"; public static final String AOA_OPERATION = "APPEND_OR_ADD"; public static final String AOA_OPERATION_APPEND = "APPEND_OR_ADD-APPEND"; public static final String AOA_OPERATION_ADD = "APPEND_OR_ADD-ADD"; public static final String AOA_OPERATION_REAPPEND = "APPEND_OR_ADD-RETRY-APPEND"; public static final String SET_OPERATION = "SET"; public static final String ADD_OPERATION = "ADD"; public static final String REPLACE_OPERATION = "REPLACE"; public static final String META_GET_OPERATION = "M_GET"; public static final String META_SET_OPERATION = "M_SET"; public static final String META_DEBUG_OPERATION = "M_DEBUG"; }
4,027
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/BaseConnectionFactory.java
package com.netflix.evcache.connection; import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.channels.SocketChannel; import java.util.Collection; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import com.netflix.archaius.api.Property; import com.netflix.evcache.EVCacheTranscoder; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPool; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.pool.EVCacheKetamaNodeLocatorConfiguration; import com.netflix.evcache.pool.EVCacheNodeLocator; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.BinaryConnectionFactory; import net.spy.memcached.ConnectionObserver; import net.spy.memcached.DefaultHashAlgorithm; import net.spy.memcached.EVCacheConnection; import net.spy.memcached.FailureMode; import net.spy.memcached.HashAlgorithm; import net.spy.memcached.MemcachedConnection; import net.spy.memcached.MemcachedNode; import net.spy.memcached.NodeLocator; import net.spy.memcached.ops.Operation; import net.spy.memcached.protocol.binary.EVCacheNodeImpl; import net.spy.memcached.transcoders.Transcoder; public class BaseConnectionFactory extends BinaryConnectionFactory { protected final String name; protected final String appName; protected final Property<Integer> operationTimeout; protected final long opMaxBlockTime; protected EVCacheNodeLocator locator; protected final long startTime; protected final EVCacheClient client; protected final Property<String> failureMode; BaseConnectionFactory(EVCacheClient client, int len, Property<Integer> _operationTimeout, long opMaxBlockTime) { super(len, BinaryConnectionFactory.DEFAULT_READ_BUFFER_SIZE, DefaultHashAlgorithm.KETAMA_HASH); this.opMaxBlockTime = opMaxBlockTime; this.operationTimeout = _operationTimeout; this.client = client; this.startTime = System.currentTimeMillis(); this.appName = client.getAppName(); this.failureMode = client.getPool().getEVCacheClientPoolManager().getEVCacheConfig().getPropertyRepository().get(this.client.getServerGroupName() + ".failure.mode", String.class).orElseGet(appName + ".failure.mode").orElse("Retry"); this.name = appName + "-" + client.getServerGroupName() + "-" + client.getId(); } public NodeLocator createLocator(List<MemcachedNode> list) { this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new EVCacheKetamaNodeLocatorConfiguration(client)); return locator; } public EVCacheNodeLocator getEVCacheNodeLocator() { return this.locator; } public long getMaxReconnectDelay() { return super.getMaxReconnectDelay(); } public int getOpQueueLen() { return super.getOpQueueLen(); } public int getReadBufSize() { return super.getReadBufSize(); } public BlockingQueue<Operation> createOperationQueue() { return new ArrayBlockingQueue<Operation>(getOpQueueLen()); } public MemcachedConnection createConnection(List<InetSocketAddress> addrs) throws IOException { return new EVCacheConnection(name, getReadBufSize(), this, addrs, getInitialObservers(), getFailureMode(), getOperationFactory()); } public MemcachedNode createMemcachedNode(SocketAddress sa, SocketChannel c, int bufSize) { boolean doAuth = false; final EVCacheNodeImpl node = new EVCacheNodeImpl(sa, c, bufSize, createReadOperationQueue(), createWriteOperationQueue(), createOperationQueue(), opMaxBlockTime, doAuth, getOperationTimeout(), getAuthWaitTime(), this, client, startTime); node.registerMonitors(); return node; } public long getOperationTimeout() { return operationTimeout.get(); } public BlockingQueue<Operation> createReadOperationQueue() { return super.createReadOperationQueue(); } public BlockingQueue<Operation> createWriteOperationQueue() { return super.createWriteOperationQueue(); } public Transcoder<Object> getDefaultTranscoder() { return new EVCacheTranscoder(); } public FailureMode getFailureMode() { try { return FailureMode.valueOf(failureMode.get()); } catch (IllegalArgumentException ex) { return FailureMode.Cancel; } } public HashAlgorithm getHashAlg() { return super.getHashAlg(); } public Collection<ConnectionObserver> getInitialObservers() { return super.getInitialObservers(); } public boolean isDaemon() { return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.thread.daemon", Boolean.class).orElse(super.isDaemon()).get(); } public boolean shouldOptimize() { return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.broadcast.base.connection.optimize", Boolean.class).orElse(true).get(); } public boolean isDefaultExecutorService() { return false; } public ExecutorService getListenerExecutorService() { return client.getPool().getEVCacheClientPoolManager().getEVCacheExecutor(); } public int getId() { return client.getId(); } public String getZone() { return client.getServerGroup().getZone(); } public String getServerGroupName() { return client.getServerGroup().getName(); } public String getReplicaSetName() { return client.getServerGroup().getName(); } public String getAppName() { return this.appName; } public String toString() { return name; } public EVCacheClientPoolManager getEVCacheClientPoolManager() { return this.client.getPool().getEVCacheClientPoolManager(); } public EVCacheClientPool getEVCacheClientPool() { return this.client.getPool(); } public EVCacheClient getEVCacheClient() { return this.client; } }
4,028
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/IConnectionBuilder.java
package com.netflix.evcache.connection; import com.netflix.evcache.pool.EVCacheClient; import net.spy.memcached.ConnectionFactory; public interface IConnectionBuilder { ConnectionFactory getConnectionFactory(EVCacheClient client); }
4,029
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/BaseAsciiConnectionFactory.java
package com.netflix.evcache.connection; import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.channels.SocketChannel; import java.util.Collection; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import com.netflix.archaius.api.Property; import com.netflix.evcache.EVCacheTranscoder; import com.netflix.evcache.operation.EVCacheAsciiOperationFactory; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPool; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.pool.EVCacheKetamaNodeLocatorConfiguration; import com.netflix.evcache.pool.EVCacheNodeLocator; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.ConnectionObserver; import net.spy.memcached.DefaultConnectionFactory; import net.spy.memcached.DefaultHashAlgorithm; import net.spy.memcached.EVCacheConnection; import net.spy.memcached.FailureMode; import net.spy.memcached.HashAlgorithm; import net.spy.memcached.MemcachedConnection; import net.spy.memcached.MemcachedNode; import net.spy.memcached.NodeLocator; import net.spy.memcached.OperationFactory; import net.spy.memcached.ops.Operation; import net.spy.memcached.protocol.ascii.AsciiOperationFactory; import net.spy.memcached.protocol.ascii.EVCacheAsciiNodeImpl; import net.spy.memcached.transcoders.Transcoder; public class BaseAsciiConnectionFactory extends DefaultConnectionFactory { protected final String name; protected final String appName; protected final Property<Integer> operationTimeout; protected final long opMaxBlockTime; protected EVCacheNodeLocator locator; protected final long startTime; protected final EVCacheClient client; protected final Property<String> failureMode; BaseAsciiConnectionFactory(EVCacheClient client, int len, Property<Integer> _operationTimeout, long opMaxBlockTime) { super(len, DefaultConnectionFactory.DEFAULT_READ_BUFFER_SIZE, DefaultHashAlgorithm.KETAMA_HASH); this.opMaxBlockTime = opMaxBlockTime; this.operationTimeout = _operationTimeout; this.client = client; this.startTime = System.currentTimeMillis(); this.appName = client.getAppName(); this.failureMode = client.getPool().getEVCacheClientPoolManager().getEVCacheConfig().getPropertyRepository().get(this.client.getServerGroupName() + ".failure.mode", String.class).orElseGet(appName + ".failure.mode").orElse("Retry"); this.name = appName + "-" + client.getServerGroupName() + "-" + client.getId(); } public NodeLocator createLocator(List<MemcachedNode> list) { this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new EVCacheKetamaNodeLocatorConfiguration(client)); return locator; } public EVCacheNodeLocator getEVCacheNodeLocator() { return this.locator; } public long getMaxReconnectDelay() { return super.getMaxReconnectDelay(); } public int getOpQueueLen() { return super.getOpQueueLen(); } public int getReadBufSize() { return super.getReadBufSize(); } public BlockingQueue<Operation> createOperationQueue() { return new ArrayBlockingQueue<Operation>(getOpQueueLen()); } public MemcachedConnection createConnection(List<InetSocketAddress> addrs) throws IOException { return new EVCacheConnection(name, getReadBufSize(), this, addrs, getInitialObservers(), getFailureMode(), getOperationFactory()); } public EVCacheAsciiOperationFactory getOperationFactory() { return new EVCacheAsciiOperationFactory(); } public MemcachedNode createMemcachedNode(SocketAddress sa, SocketChannel c, int bufSize) { boolean doAuth = false; final EVCacheAsciiNodeImpl node = new EVCacheAsciiNodeImpl(sa, c, bufSize, createReadOperationQueue(), createWriteOperationQueue(), createOperationQueue(), opMaxBlockTime, doAuth, getOperationTimeout(), getAuthWaitTime(), this, client, startTime); node.registerMonitors(); return node; } public long getOperationTimeout() { return operationTimeout.get(); } public BlockingQueue<Operation> createReadOperationQueue() { return super.createReadOperationQueue(); } public BlockingQueue<Operation> createWriteOperationQueue() { return super.createWriteOperationQueue(); } public Transcoder<Object> getDefaultTranscoder() { return new EVCacheTranscoder(); } public FailureMode getFailureMode() { try { return FailureMode.valueOf(failureMode.get()); } catch (IllegalArgumentException ex) { return FailureMode.Cancel; } } public HashAlgorithm getHashAlg() { return super.getHashAlg(); } public Collection<ConnectionObserver> getInitialObservers() { return super.getInitialObservers(); } public boolean isDaemon() { return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.thread.daemon", Boolean.class).orElse(super.isDaemon()).get(); } public boolean shouldOptimize() { return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.broadcast.ascii.connection.optimize", Boolean.class).orElse(true).get(); } public boolean isDefaultExecutorService() { return false; } public ExecutorService getListenerExecutorService() { return client.getPool().getEVCacheClientPoolManager().getEVCacheExecutor(); } public int getId() { return client.getId(); } public String getZone() { return client.getServerGroup().getZone(); } public String getServerGroupName() { return client.getServerGroup().getName(); } public String getReplicaSetName() { return client.getServerGroup().getName(); } public String getAppName() { return this.appName; } public String toString() { return name; } public EVCacheClientPoolManager getEVCacheClientPoolManager() { return this.client.getPool().getEVCacheClientPoolManager(); } public EVCacheClientPool getEVCacheClientPool() { return this.client.getPool(); } public EVCacheClient getEVCacheClient() { return this.client; } }
4,030
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/ConnectionFactoryBuilder.java
package com.netflix.evcache.connection; import com.netflix.archaius.api.Property; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.ConnectionFactory; public class ConnectionFactoryBuilder implements IConnectionBuilder { public ConnectionFactoryBuilder() { } public ConnectionFactory getConnectionFactory(EVCacheClient client) { final String appName = client.getAppName(); final int maxQueueSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".max.queue.length", Integer.class).orElse(16384).get(); final Property<Integer> operationTimeout = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".operation.timeout", Integer.class).orElse(2500); final int opQueueMaxBlockTime = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10).get(); final boolean useBinary = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.use.binary.protocol", Boolean.class).orElse(true).get(); if(useBinary) return new BaseConnectionFactory(client, maxQueueSize, operationTimeout, opQueueMaxBlockTime); else return new BaseAsciiConnectionFactory(client, maxQueueSize, operationTimeout, opQueueMaxBlockTime); } }
4,031
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/EVCacheConfig.java
package com.netflix.evcache.util; import java.lang.reflect.Type; import java.util.function.Consumer; import java.util.function.Function; import javax.inject.Inject; import com.netflix.archaius.DefaultPropertyFactory; import com.netflix.archaius.api.Property; import com.netflix.archaius.api.PropertyListener; import com.netflix.archaius.api.PropertyRepository; import com.netflix.archaius.api.config.CompositeConfig; import com.netflix.archaius.config.DefaultCompositeConfig; import com.netflix.archaius.config.DefaultSettableConfig; import com.netflix.archaius.config.EnvironmentConfig; import com.netflix.archaius.config.SystemConfig; import com.netflix.evcache.config.EVCachePersistedProperties; public class EVCacheConfig { private static EVCacheConfig INSTANCE; /** * This is an hack, should find a better way to do this **/ private static PropertyRepository propertyRepository; @Inject public EVCacheConfig(PropertyRepository repository) { PropertyRepository _propertyRepository = null; if(repository == null) { try { final CompositeConfig applicationConfig = new DefaultCompositeConfig(true); CompositeConfig remoteLayer = new DefaultCompositeConfig(true); applicationConfig.addConfig("RUNTIME", new DefaultSettableConfig()); applicationConfig.addConfig("REMOTE", remoteLayer); applicationConfig.addConfig("SYSTEM", SystemConfig.INSTANCE); applicationConfig.addConfig("ENVIRONMENT", EnvironmentConfig.INSTANCE); final EVCachePersistedProperties remote = new EVCachePersistedProperties(); remoteLayer.addConfig("remote-1", remote.getPollingDynamicConfig()); _propertyRepository = new DefaultPropertyFactory(applicationConfig); } catch (Exception e) { e.printStackTrace(); _propertyRepository = new DefaultPropertyFactory(new DefaultCompositeConfig()); } } else { _propertyRepository = repository; } propertyRepository = new EVCachePropertyRepository(_propertyRepository); //propertyRepository = _propertyRepository; INSTANCE = this; } private EVCacheConfig() { this(null); } public static EVCacheConfig getInstance() { if(INSTANCE == null) new EVCacheConfig(); return INSTANCE; } public PropertyRepository getPropertyRepository() { return propertyRepository; } public static void setPropertyRepository(PropertyRepository repository) { propertyRepository = repository; } class EVCachePropertyRepository implements PropertyRepository { private final PropertyRepository delegate; EVCachePropertyRepository(PropertyRepository delegate) { this.delegate = delegate; } @Override public <T> Property<T> get(String key, Class<T> type) { return new EVCacheProperty<T>(delegate.get(key, type)); } @Override public <T> Property<T> get(String key, Type type) { return new EVCacheProperty<T>(delegate.get(key, type)); } } class EVCacheProperty<T> implements Property<T> { private final Property<T> property; EVCacheProperty(Property<T> prop) { property = prop; } @Override public T get() { return property.get(); } @Override public String getKey() { return property.getKey(); } @Override public void addListener(PropertyListener<T> listener) { // TODO Auto-generated method stub property.addListener(listener); } @Override public void removeListener(PropertyListener<T> listener) { // TODO Auto-generated method stub property.removeListener(listener); } @Override public Subscription onChange(Consumer<T> consumer) { // TODO Auto-generated method stub return property.onChange(consumer); } @Override public Subscription subscribe(Consumer<T> consumer) { // TODO Auto-generated method stub return property.subscribe(consumer); } @Override public Property<T> orElse(T defaultValue) { // TODO Auto-generated method stub return new EVCacheProperty<T>(property.orElse(defaultValue)); } @Override public Property<T> orElseGet(String key) { // TODO Auto-generated method stub return new EVCacheProperty<T>(property.orElseGet(key)); } @Override public <S> Property<S> map(Function<T, S> mapper) { // TODO Auto-generated method stub return property.map(mapper); } @Override public String toString() { return "EVCacheProperty [Key=" + getKey() + ",value="+get() + "]"; } } }
4,032
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/Pair.java
package com.netflix.evcache.util; public class Pair<E1, E2> { public E1 first() { return first; } public void setFirst(E1 first) { this.first = first; } public E2 second() { return second; } public void setSecond(E2 second) { this.second = second; } private E1 first; private E2 second; public Pair(E1 first, E2 second) { this.first = first; this.second = second; } }
4,033
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/RetryCount.java
package com.netflix.evcache.util; public class RetryCount { private int retryCount; public RetryCount() { retryCount = 1; } public void incr() { retryCount++; } public int get(){ return retryCount; } }
4,034
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/KeyHasher.java
package com.netflix.evcache.util; import java.util.Arrays; import java.util.Base64; import java.util.Base64.Encoder; import org.apache.log4j.BasicConfigurator; import org.apache.log4j.ConsoleAppender; import org.apache.log4j.Level; import org.apache.log4j.PatternLayout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.fzakaria.ascii85.Ascii85; import com.google.common.base.Charsets; import com.google.common.hash.HashCode; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.netflix.archaius.api.Property; public class KeyHasher { /** * meta data size * 40 + key + 'item_hdr' size i.e. 40 + keysize + 12 And if client flags are present: 40 + keysize + 4 bytes(for flags) + 12 And if CAS and client flags are present: 40 + keysize + 4 bytes(for flags) + 8(for CAS) + 12 */ public enum HashingAlgorithm { murmur3, adler32, crc32, sha1, sha256, siphash24, md5, NO_HASHING // useful for disabling hashing at client level, while Hashing is enabled at App level } public static HashingAlgorithm getHashingAlgorithmFromString(String algorithmStr) { try { if (null == algorithmStr || algorithmStr.isEmpty()) { return null; } return HashingAlgorithm.valueOf(algorithmStr.toLowerCase()); } catch (IllegalArgumentException ex) { // default to md5 incase of unsupported algorithm return HashingAlgorithm.md5; } } private static final Logger log = LoggerFactory.getLogger(KeyHasher.class); private static final Encoder encoder= Base64.getEncoder().withoutPadding(); public static String getHashedKeyEncoded(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes, Integer maxHashLength) { return getHashedKeyEncoded(key, hashingAlgorithm, maxDigestBytes, maxHashLength, null); } public static String getHashedKeyEncoded(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes, Integer maxHashLength, String baseEncoder) { final long start = System.nanoTime(); byte[] digest = getHashedKey(key, hashingAlgorithm, maxDigestBytes); if(log.isDebugEnabled()) { final char[] HEX_ARRAY = "0123456789ABCDEF".toCharArray(); char[] hexChars = new char[digest.length * 2]; for (int j = 0; j < digest.length; j++) { int v = digest[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } log.debug("Key : " + key +"; hex : " + new String(hexChars)); } if(log.isDebugEnabled()) log.debug("Key : " + key +"; digest length : " + digest.length + "; byte Array contents : " + Arrays.toString(digest) ); String hKey = null; if(baseEncoder != null && baseEncoder.equals("ascii85")) { hKey = Ascii85.encode(digest); if(log.isDebugEnabled()) log.debug("Key : " + key +"; Hashed & Ascii85 encoded key : " + hKey + "; Took " + (System.nanoTime() - start) + " nanos"); } else { hKey = encoder.encodeToString(digest); if (null != hKey && maxHashLength != null && maxHashLength > 0 && maxHashLength < hKey.length()) { hKey = hKey.substring(0, maxHashLength); } if(log.isDebugEnabled()) log.debug("Key : " + key +"; Hashed & encoded key : " + hKey + "; Took " + (System.nanoTime() - start) + " nanos"); } return hKey; } public static byte[] getHashedKeyInBytes(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes) { final long start = System.nanoTime(); byte[] digest = getHashedKey(key, hashingAlgorithm, maxDigestBytes); if(log.isDebugEnabled()) log.debug("Key : " + key +"; digest length : " + digest.length + "; byte Array contents : " + Arrays.toString(digest) + "; Took " + (System.nanoTime() - start) + " nanos"); return digest; } private static byte[] getHashedKey(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes) { HashFunction hf = null; switch (hashingAlgorithm) { case murmur3: hf = Hashing.murmur3_128(); break; case adler32: hf = Hashing.adler32(); break; case crc32: hf = Hashing.crc32(); break; case sha1: hf = Hashing.sha1(); break; case sha256: hf = Hashing.sha256(); break; case siphash24: hf = Hashing.sipHash24(); break; case md5: default: hf = Hashing.md5(); break; } final HashCode hc = hf.newHasher().putString(key, Charsets.UTF_8).hash(); final byte[] digest = hc.asBytes(); if (maxDigestBytes != null && maxDigestBytes > 0 && maxDigestBytes < digest.length) { return Arrays.copyOfRange(digest, 0, maxDigestBytes); } return digest; } public static void main(String args[]) { BasicConfigurator.resetConfiguration(); BasicConfigurator.configure(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} [%t] %p %c %x - %m%n"))); org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG); String key = "MAP_LT:721af5a5-3452-4b62-86fb-5f31ccde8d99_187978153X28X2787347X1601330156682"; System.out.println(getHashedKeyEncoded(key, HashingAlgorithm.murmur3, null, null)); } }
4,035
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/EVCacheBulkDataDto.java
package com.netflix.evcache.util; import com.netflix.evcache.EVCacheKey; import java.util.List; import java.util.Map; public class EVCacheBulkDataDto<T> { private Map<String, T> decanonicalR; private List<EVCacheKey> evcKeys; public EVCacheBulkDataDto(Map<String, T> decanonicalR, List<EVCacheKey> evcKeys) { this.decanonicalR = decanonicalR; this.evcKeys = evcKeys; } public Map<String, T> getDecanonicalR() { return decanonicalR; } public List<EVCacheKey> getEvcKeys() { return evcKeys; } public void setDecanonicalR(Map<String, T> decanonicalR) { this.decanonicalR = decanonicalR; } public void setEvcKeys(List<EVCacheKey> evcKeys) { this.evcKeys = evcKeys; } }
4,036
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/ServerGroupCircularIterator.java
package com.netflix.evcache.util; import java.util.Iterator; import java.util.Set; import com.netflix.evcache.pool.ServerGroup; /** * A circular iterator for ReplicaSets. This ensures that all ReplicaSets are * equal number of requests. * * @author smadappa */ public class ServerGroupCircularIterator { private Entry<ServerGroup> entry; private int size = 0; /** * Creates an instance of ReplicaSetCircularIterator across all ReplicaSets. * * @param allReplicaSets * Set of all available ReplicaSets. */ public ServerGroupCircularIterator(Set<ServerGroup> allReplicaSets) { if (allReplicaSets == null || allReplicaSets.isEmpty()) return; Entry<ServerGroup> pEntry = null; for (Iterator<ServerGroup> itr = allReplicaSets.iterator(); itr.hasNext();) { size++; final ServerGroup rSet = itr.next(); final Entry<ServerGroup> newEntry = new Entry<ServerGroup>(rSet, pEntry); if (entry == null) entry = newEntry; pEntry = newEntry; } /* * Connect the first and the last entry to form a circular list */ if (pEntry != null) { entry.next = pEntry; } } /** * Returns the next ReplicaSet which should get the request. * * @return - the next ReplicaSetCircularIterator in the iterator. If there * are none then null is returned. */ public ServerGroup next() { if (entry == null) return null; entry = entry.next; return entry.element; } /** * Returns the next ReplicaSet excluding the given ReplicaSet which should * get the request. * * @return - the next ReplicaSet in the iterator. If there are none then * null is returned. */ public ServerGroup next(ServerGroup ignoreReplicaSet) { if (entry == null) return null; entry = entry.next; if (entry.element.equals(ignoreReplicaSet)) { return entry.next.element; } else { return entry.element; } } public int getSize() { return size; } /** * The Entry keeps track of the current element and next element in the * list. * * @author smadappa * * @param <E> */ static class Entry<E> { private E element; private Entry<E> next; /** * Creates an instance of Entry. */ Entry(E element, Entry<E> next) { this.element = element; this.next = next; } } public String toString() { final StringBuilder current = new StringBuilder(); if (entry != null) { Entry<ServerGroup> startEntry = entry; current.append(startEntry.element); while (!entry.next.equals(startEntry)) { current.append(",").append(entry.next.element); entry = entry.next; } } return "Server Group Iterator : { size=" + getSize() + "; Server Group=" + current.toString() + "}"; } }
4,037
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/Sneaky.java
package com.netflix.evcache.util; /** * Sneaky can be used to sneakily throw checked exceptions without actually declaring this in your method's throws clause. * This somewhat contentious ability should be used carefully, of course. */ public class Sneaky { public static RuntimeException sneakyThrow(Throwable t) { if ( t == null ) throw new NullPointerException("t"); Sneaky.<RuntimeException>sneakyThrow0(t); return null; } @SuppressWarnings("unchecked") private static <T extends Throwable> void sneakyThrow0(Throwable t) throws T { throw (T)t; } }
4,038
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/ZoneFallbackIterator.java
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.evcache.util; import java.util.Iterator; import java.util.Set; /** * A Zone Based fallback circular iterator. This ensures that during a fallback * scenario the requests are spread out across all zones evenly. * * @author smadappa */ public class ZoneFallbackIterator { private Entry<String> entry; private int size = 0; /** * Creates an instance of ZoneFallbackIterator given all the zones. * * @param allZones * Set of all available zones. */ public ZoneFallbackIterator(Set<String> allZones) { if (allZones == null || allZones.size() == 0) return; Entry<String> pEntry = null; for (Iterator<String> itr = allZones.iterator(); itr.hasNext();) { size++; final String zone = itr.next(); final Entry<String> newEntry = new Entry<String>(zone, pEntry); if (entry == null) entry = newEntry; pEntry = newEntry; } /* * Connect the first and the last entry to form a circular list */ if (pEntry != null) { entry.next = pEntry; } } /** * Returns the next zone from the set which should get the request. * * @return - the next zone in the iterator. If there are none then null is * returned. */ public String next() { if (entry == null) return null; entry = entry.next; return entry.element; } /** * Returns the next zone from the set excluding the given zone which should * get the request. * * @return - the next zone in the iterator. If there are none then null is * returned. */ public String next(String ignoreZone) { if (entry == null) return null; entry = entry.next; if (entry.element.equals(ignoreZone)) { return entry.next.element; } else { return entry.element; } } public int getSize() { return size; } /** * The Entry keeps track of the current element and next element in the * list. * * @author smadappa * * @param <E> */ static class Entry<E> { private E element; private Entry<E> next; /** * Creates an instance of Entry. */ Entry(E element, Entry<E> next) { this.element = element; this.next = next; } } }
4,039
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/SupplierUtils.java
package com.netflix.evcache.util; import java.util.concurrent.Callable; import java.util.function.Supplier; public final class SupplierUtils { private SupplierUtils() { } public static <T> Supplier<T> wrap(Callable<T> callable) { return () -> { try { return callable.call(); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } }; } }
4,040
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/CircularIterator.java
package com.netflix.evcache.util; import java.lang.reflect.Array; import java.util.Collection; import java.util.Iterator; /** * A circular iterator for ReplicaSets. This ensures that all ReplicaSets are * equal number of requests. * * @author smadappa */ public class CircularIterator<T> { private Entry<T> entry; private int size = 0; /** * Creates an instance of ReplicaSetCircularIterator across all ReplicaSets. * * @param allReplicaSets * Set of all available ReplicaSets. */ public CircularIterator(Collection<T> allReplicaSets) { if (allReplicaSets == null || allReplicaSets.isEmpty()) return; Entry<T> pEntry = null; for (Iterator<T> itr = allReplicaSets.iterator(); itr.hasNext();) { size++; final T rSet = itr.next(); final Entry<T> newEntry = new Entry<T>(rSet, pEntry); if (entry == null) entry = newEntry; pEntry = newEntry; } /* * Connect the first and the last entry to form a circular list */ if (pEntry != null) { entry.next = pEntry; } } /** * Returns the next ReplicaSet which should get the request. * * @return - the next ReplicaSetCircularIterator in the iterator. If there * are none then null is returned. */ public T next() { if (entry == null) return null; entry = entry.next; return entry.element; } /** * Returns the next ReplicaSet excluding the given ReplicaSet which should * get the request. * * @return - the next ReplicaSet in the iterator. If there are none then * null is returned. */ public T next(T ignoreReplicaSet) { if (entry == null) return null; entry = entry.next; if (entry.element.equals(ignoreReplicaSet)) { return entry.next.element; } else { return entry.element; } } public int getSize() { return size; } /** * The Entry keeps track of the current element and next element in the * list. * * @author smadappa * * @param <E> */ static class Entry<E> { private E element; private Entry<E> next; /** * Creates an instance of Entry. */ Entry(E element, Entry<E> next) { this.element = element; this.next = next; } } public String toString() { final StringBuilder current = new StringBuilder(); if (entry != null) { Entry<T> startEntry = entry; if(startEntry.element.getClass().isArray()) { for(int i = 0; i < Array.getLength(startEntry.element); i++) { if(i > 0) current.append(","); current.append("[").append(i).append(", ").append(Array.get(startEntry.element, i).toString()).append("]"); } } else { current.append(startEntry.element); } while (!entry.next.equals(startEntry)) { if(entry.next.element.getClass().isArray()) { for(int i = 0; i < Array.getLength(entry.next.element); i++) { if(i > 0) current.append(","); current.append("[").append(i).append(", ").append(Array.get(entry.next.element, i).toString()).append("]"); } } else { current.append(",[").append(entry.next.element).append("]"); } entry = entry.next; } } return "Server Group Iterator : { size=" + getSize() + "; Server Group=" + current.toString() + "}"; } }
4,041
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/config/EVCachePersistedProperties.java
package com.netflix.evcache.config; import java.net.URL; import java.net.URLEncoder; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.config.PollingDynamicConfig; import com.netflix.archaius.config.polling.FixedPollingStrategy; import com.netflix.archaius.persisted2.DefaultPersisted2ClientConfig; import com.netflix.archaius.persisted2.JsonPersistedV2Reader; import com.netflix.archaius.persisted2.Persisted2ClientConfig; import com.netflix.archaius.persisted2.ScopePredicates; import com.netflix.archaius.persisted2.loader.HTTPStreamLoader; public class EVCachePersistedProperties { private static Logger log = LoggerFactory.getLogger(EVCachePersistedProperties.class); private static final String SCOPE_CLUSTER = "cluster"; private static final String SCOPE_AMI = "ami"; private static final String SCOPE_ZONE = "zone"; private static final String SCOPE_ASG = "asg"; private static final String SCOPE_SERVER_ID = "serverId"; private static final String SCOPE_REGION = "region"; private static final String SCOPE_STACK = "stack"; private static final String SCOPE_ENV = "env"; private static final String SCOPE_APP_ID = "appId"; private PollingDynamicConfig config; public EVCachePersistedProperties() { } private Persisted2ClientConfig getConfig() { final String region = System.getProperty("netflix.region", getSystemEnvValue("NETFLIX_REGION", "us-east-1")); final String env = System.getProperty("netflix.environment", getSystemEnvValue("NETFLIX_ENVIRONMENT", "test")); String url = System.getProperty("platformserviceurl", "http://platformservice."+region+".dyn" + env +".netflix.net:7001/platformservice/REST/v2/properties/jsonFilterprops"); return new DefaultPersisted2ClientConfig() .setEnabled(true) .withServiceUrl(url) .withQueryScope(SCOPE_APP_ID, System.getProperty("netflix.appId", getSystemEnvValue("NETFLIX_APP", "")), "") .withQueryScope(SCOPE_ENV, env, "") .withQueryScope(SCOPE_STACK, System.getProperty("netflix.stack", getSystemEnvValue("NETFLIX_STACK", "")), "") .withQueryScope(SCOPE_REGION, region, "") .withScope(SCOPE_APP_ID, System.getProperty("netflix.appId", getSystemEnvValue("NETFLIX_APP", ""))) .withScope(SCOPE_ENV, env) .withScope(SCOPE_STACK, System.getProperty("netflix.stack", getSystemEnvValue("NETFLIX_STACK", ""))) .withScope(SCOPE_REGION, region) .withScope(SCOPE_SERVER_ID, System.getProperty("netflix.serverId", getSystemEnvValue("NETFLIX_INSTANCE_ID", ""))) .withScope(SCOPE_ASG, System.getProperty("netflix.appinfo.asgName", getSystemEnvValue("NETFLIX_AUTO_SCALE_GROUP", ""))) .withScope(SCOPE_ZONE, getSystemEnvValue("EC2_AVAILABILITY_ZONE", "")) .withScope(SCOPE_AMI, getSystemEnvValue("EC2_AMI_ID", "")) .withScope(SCOPE_CLUSTER, getSystemEnvValue("NETFLIX_CLUSTER", "")) .withPrioritizedScopes(SCOPE_SERVER_ID, SCOPE_ASG, SCOPE_AMI, SCOPE_CLUSTER, SCOPE_APP_ID, SCOPE_ENV, SCOPE_STACK, SCOPE_ZONE, SCOPE_REGION) ; } private String getSystemEnvValue(String key, String def) { final String val = System.getenv(key); return val == null ? def : val; } private String getFilterString(Map<String, Set<String>> scopes) { StringBuilder sb = new StringBuilder(); for (Entry<String, Set<String>> scope : scopes.entrySet()) { if (scope.getValue().isEmpty()) continue; if (sb.length() > 0) { sb.append(" and "); } sb.append("("); boolean first = true; for (String value : scope.getValue()) { if (!first) { sb.append(" or "); } else { first = false; } sb.append(scope.getKey()); if (null == value) { sb.append(" is null"); } else if (value.isEmpty()) { sb.append("=''"); } else { sb.append("='").append(value).append("'"); } } sb.append(")"); } return sb.toString(); } public PollingDynamicConfig getPollingDynamicConfig() { try { Persisted2ClientConfig clientConfig = getConfig(); log.info("Remote config : " + clientConfig); String url = new StringBuilder() .append(clientConfig.getServiceUrl()) .append("?skipPropsWithExtraScopes=").append(clientConfig.getSkipPropsWithExtraScopes()) .append("&filter=").append(URLEncoder.encode(getFilterString(clientConfig.getQueryScopes()), "UTF-8")) .toString(); if (clientConfig.isEnabled()) { JsonPersistedV2Reader reader = JsonPersistedV2Reader.builder(new HTTPStreamLoader(new URL(url))) .withPath("propertiesList") .withScopes(clientConfig.getPrioritizedScopes()) .withPredicate(ScopePredicates.fromMap(clientConfig.getScopes())) .build(); config = new PollingDynamicConfig(reader, new FixedPollingStrategy(clientConfig.getRefreshRate(), TimeUnit.SECONDS)); return config; } } catch (Exception e1) { throw new RuntimeException(e1); } return null; } }
4,042
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPool.java
package com.netflix.evcache.pool; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; import javax.management.MBeanServer; import javax.management.ObjectName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.observer.EVCacheConnectionObserver; import com.netflix.evcache.util.CircularIterator; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.evcache.util.ServerGroupCircularIterator; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Gauge; import com.netflix.spectator.api.Id; import com.netflix.spectator.api.Tag; import net.spy.memcached.EVCacheNode; import net.spy.memcached.MemcachedNode; @edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "REC_CATCH_EXCEPTION", "MDM_THREAD_YIELD" }) public class EVCacheClientPool implements Runnable, EVCacheClientPoolMBean { private static final Logger log = LoggerFactory.getLogger(EVCacheClientPool.class); private final String _appName; private final String _zone; private final EVCacheClientPoolManager manager; private ServerGroupCircularIterator localServerGroupIterator = null; private final Property<Boolean> _zoneAffinity; private final Property<Integer> _poolSize; // Number of MemcachedClients to each cluster private final Property<Integer> _readTimeout; // Timeout for readOperation private final Property<Integer> _bulkReadTimeout; // Timeout for readOperation public static final String DEFAULT_PORT = "11211"; public static final String DEFAULT_SECURE_PORT = "11443"; private final Property<Boolean> _retryAcrossAllReplicas; private long lastReconcileTime = 0; private final Property<Integer> logOperations; private final Property<Set<String>> logOperationCalls; private final Property<Set<String>> cloneWrite; // name of the duet EVCache application, if applicable. private final Property<String> duet; // indicates if duet needs to be primary private final Property<Boolean> duetPrimary; // evCacheClientPool of the duet EVCache application, if applicable. Supports daisy chaining. private EVCacheClientPool duetClientPool; // indicates if this evCacheClientPool is a duet. This property is used to mark EVCacheClients of this pool // as duet if applicable. The duet property on the EVCacheClient is then used to know what kind of key of // EVCacheKey (i.e. normal key vs duet key) should be passed to the client private boolean isDuet; private final Property<Integer> _opQueueMaxBlockTime; // Timeout for adding an operation private final Property<Integer> _operationTimeout;// Timeout for write operation private final Property<Integer> _maxReadQueueSize; private final Property<Integer> reconcileInterval; private final Property<Integer> _maxRetries; private final Property<Boolean> _pingServers; private final Property<Boolean> refreshConnectionOnReadQueueFull; private final Property<Integer> refreshConnectionOnReadQueueFullSize; private final ThreadPoolExecutor asyncRefreshExecutor; private final Property<Boolean> _disableAsyncRefresh; private final List<Tag> tagList; // private final Id poolSizeId; //private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>(); private final Map<String, Gauge> gaugeMap = new ConcurrentHashMap<String, Gauge>(); private final ReentrantLock refreshLock = new ReentrantLock(); @SuppressWarnings("serial") private final Map<ServerGroup, Property<Boolean>> writeOnlyFastPropertyMap = new ConcurrentHashMap<ServerGroup, Property<Boolean>>() { @Override public Property<Boolean> get(Object _serverGroup) { final ServerGroup serverGroup = ServerGroup.class.cast(_serverGroup); Property<Boolean> isServerGroupInWriteOnlyMode = super.get(serverGroup); if (isServerGroupInWriteOnlyMode != null) return isServerGroupInWriteOnlyMode; isServerGroupInWriteOnlyMode = EVCacheConfig.getInstance(). getPropertyRepository().get(_appName + "." + serverGroup.getName() + ".EVCacheClientPool.writeOnly", Boolean.class) .orElseGet(_appName + "." + serverGroup.getZone() + ".EVCacheClientPool.writeOnly").orElse(false); put(serverGroup, isServerGroupInWriteOnlyMode); return isServerGroupInWriteOnlyMode; }; }; private final AtomicLong numberOfModOps = new AtomicLong(0); private boolean _shutdown = false; private Map<ServerGroup, List<EVCacheClient>> memcachedInstancesByServerGroup = new ConcurrentHashMap<ServerGroup, List<EVCacheClient>>(); private Map<ServerGroup, List<EVCacheClient>> memcachedReadInstancesByServerGroup = new ConcurrentHashMap<ServerGroup, List<EVCacheClient>>(); private Map<ServerGroup, List<EVCacheClient>> memcachedWriteInstancesByServerGroup = new ConcurrentSkipListMap<ServerGroup, List<EVCacheClient>>(); private final Map<InetSocketAddress, Long> evCacheDiscoveryConnectionLostSet = new ConcurrentHashMap<InetSocketAddress, Long>(); private Map<String, ServerGroupCircularIterator> readServerGroupByZone = new ConcurrentHashMap<String, ServerGroupCircularIterator>(); private ServerGroupCircularIterator memcachedFallbackReadInstances = new ServerGroupCircularIterator(Collections.<ServerGroup> emptySet()); private CircularIterator<EVCacheClient[]> allEVCacheWriteClients = new CircularIterator<EVCacheClient[]>(Collections.<EVCacheClient[]> emptyList()); private final EVCacheNodeList provider; EVCacheClientPool(final String appName, final EVCacheNodeList provider, final ThreadPoolExecutor asyncRefreshExecutor, final EVCacheClientPoolManager manager, boolean isDuet) { this._appName = appName; this.provider = provider; this.asyncRefreshExecutor = asyncRefreshExecutor; this.manager = manager; this.isDuet = isDuet; String ec2Zone = System.getenv("EC2_AVAILABILITY_ZONE"); if (ec2Zone == null) ec2Zone = System.getProperty("EC2_AVAILABILITY_ZONE"); this._zone = (ec2Zone == null) ? "GLOBAL" : ec2Zone; final EVCacheConfig config = EVCacheConfig.getInstance(); final Consumer<Integer> callback = t -> { clearState(); refreshPool(true, true); }; this._zoneAffinity = config.getPropertyRepository().get(appName + ".EVCacheClientPool.zoneAffinity", Boolean.class).orElse(true); this._poolSize = config.getPropertyRepository().get(appName + ".EVCacheClientPool.poolSize", Integer.class).orElse(1); this._poolSize.subscribe(callback); this._readTimeout = config.getPropertyRepository().get(appName + ".EVCacheClientPool.readTimeout", Integer.class).orElse(manager.getDefaultReadTimeout().get()); this._readTimeout.subscribe(callback); this._bulkReadTimeout = config.getPropertyRepository().get(appName + ".EVCacheClientPool.bulkReadTimeout", Integer.class).orElse(_readTimeout.get()); this._bulkReadTimeout.subscribe(callback); this.refreshConnectionOnReadQueueFull = config.getPropertyRepository().get(appName + ".EVCacheClientPool.refresh.connection.on.readQueueFull", Boolean.class).orElseGet("EVCacheClientPool.refresh.connection.on.readQueueFull").orElse(false); this.refreshConnectionOnReadQueueFullSize = config.getPropertyRepository().get(appName + ".EVCacheClientPool.refresh.connection.on.readQueueFull.size", Integer.class).orElseGet("EVCacheClientPool.refresh.connection.on.readQueueFull.size").orElse(100); this._opQueueMaxBlockTime = config.getPropertyRepository().get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10); this._opQueueMaxBlockTime.subscribe(callback); this._operationTimeout = config.getPropertyRepository().get(appName + ".operation.timeout", Integer.class).orElseGet("evcache.operation.timeout").orElse(2500); this._operationTimeout.subscribe(callback); this._maxReadQueueSize = config.getPropertyRepository().get(appName + ".max.read.queue.length", Integer.class).orElse(50); this._retryAcrossAllReplicas = config.getPropertyRepository().get(_appName + ".retry.all.copies", Boolean.class).orElse(false); this._disableAsyncRefresh = config.getPropertyRepository().get(_appName + ".disable.async.refresh", Boolean.class).orElse(false); this._maxRetries = config.getPropertyRepository().get(_appName + ".max.retry.count", Integer.class).orElse(1); Function<String, Set<String>> splitSet = t -> Arrays.stream(t.split(",")).collect(Collectors.toSet()); this.logOperations = config.getPropertyRepository().get(appName + ".log.operation", Integer.class).orElse(0); this.logOperationCalls = config.getPropertyRepository().get(appName + ".log.operation.calls", String.class).orElse("SET,DELETE,GMISS,TMISS,BMISS_ALL,TOUCH,REPLACE").map(splitSet); this.reconcileInterval = config.getPropertyRepository().get(appName + ".reconcile.interval", Integer.class).orElse(600000); this.cloneWrite = config.getPropertyRepository().get(appName + ".clone.writes.to", String.class).map(splitSet).orElse(Collections.emptySet()); this.cloneWrite.subscribe(i -> { setupClones(); }); this.duet = config.getPropertyRepository().get(appName + ".duet", String.class).orElseGet("evcache.duet").orElse(""); this.duet.subscribe(i -> { setupDuet(); }); this.duetPrimary = config.getPropertyRepository().get(appName + ".duet.primary", Boolean.class).orElseGet("evcache.duet.primary").orElse(false); tagList = new ArrayList<Tag>(2); EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName); this._pingServers = config.getPropertyRepository().get(appName + ".ping.servers", Boolean.class).orElseGet("evcache.ping.servers").orElse(false); setupMonitoring(); //init all callbacks refreshPool(false, true); setupDuet(); setupClones(); if (log.isInfoEnabled()) log.info(toString()); } private void setupClones() { for(String cloneApp : cloneWrite.get()) { manager.initEVCache(cloneApp); } } private void setupDuet() { // check if duet is already setup, if yes, remove the current duet. if (duetClientPool != null && !duetClientPool.getAppName().equalsIgnoreCase(duet.get())) { duetClientPool = null; log.info("Removed duet"); } if (null == duetClientPool && !duet.get().isEmpty()) { duetClientPool = manager.initEVCache(duet.get(), true); log.info("Completed setup of a duet with name: " + duet.get()); } } private void clearState() { cleanupMemcachedInstances(true); memcachedInstancesByServerGroup.clear(); memcachedReadInstancesByServerGroup.clear(); memcachedWriteInstancesByServerGroup.clear(); readServerGroupByZone.clear(); memcachedFallbackReadInstances = new ServerGroupCircularIterator(Collections.<ServerGroup> emptySet()); } private EVCacheClient getEVCacheClientForReadInternal() { if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) { if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup); if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true); return null; } try { List<EVCacheClient> clients = null; if (_zoneAffinity.get()) { if (localServerGroupIterator != null) { clients = memcachedReadInstancesByServerGroup.get(localServerGroupIterator.next()); } if (clients == null) { final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(); if (fallbackServerGroup == null) { if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null."); return null; } clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup); } } else { clients = new ArrayList<EVCacheClient>(memcachedReadInstancesByServerGroup.size() - 1); for (Iterator<ServerGroup> itr = memcachedReadInstancesByServerGroup.keySet().iterator(); itr .hasNext();) { final ServerGroup serverGroup = itr.next(); final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(serverGroup); final EVCacheClient client = selectClient(clientList); if (client != null) clients.add(client); } } return selectClient(clients); } catch (Throwable t) { log.error("Exception trying to get an readable EVCache Instances for zone {}", t); return null; } } /** * Returns EVCacheClient of this pool if available. Otherwise, will return EVCacheClient of the duet. * @return */ public EVCacheClient getEVCacheClientForRead() { EVCacheClient evCacheClient = getEVCacheClientForReadInternal(); // most common production scenario if (null == duetClientPool) { return evCacheClient; } // return duet if current client is not available or if duet is primary if (null == evCacheClient || duetPrimary.get()) { EVCacheClient duetClient = duetClientPool.getEVCacheClientForRead(); // if duetClient is not present, fallback to evCacheClient return null == duetClient ? evCacheClient : duetClient; } return evCacheClient; } private List<EVCacheClient> getAllEVCacheClientForReadInternal() { if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) { if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup); if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true); return Collections.<EVCacheClient> emptyList(); } try { List<EVCacheClient> clients = null; if (localServerGroupIterator != null) { clients = memcachedReadInstancesByServerGroup.get(localServerGroupIterator.next()); } if (clients == null) { final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(); if (fallbackServerGroup == null) { if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null."); return Collections.<EVCacheClient> emptyList(); } clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup); } return clients; } catch (Throwable t) { log.error("Exception trying to get readable EVCache Instances for zone ", t); return Collections.<EVCacheClient> emptyList(); } } public List<EVCacheClient> getAllEVCacheClientForRead() { List<EVCacheClient> evCacheClients = getAllEVCacheClientForReadInternal(); // most common production scenario if (null == duetClientPool) { return evCacheClients; } List<EVCacheClient> duetEVCacheClients = duetClientPool.getAllEVCacheClientForRead(); if (null == evCacheClients) return duetEVCacheClients; if (null == duetEVCacheClients) return evCacheClients; if (duetPrimary.get()) { List<EVCacheClient> clients = new ArrayList<>(duetEVCacheClients); clients.addAll(evCacheClients); return clients; } else { List<EVCacheClient> clients = new ArrayList<>(evCacheClients); clients.addAll(duetEVCacheClients); return clients; } } private EVCacheClient selectClient(List<EVCacheClient> clients) { if (clients == null || clients.isEmpty()) { if (log.isDebugEnabled()) log.debug("clients is null returning null and forcing pool refresh!!!"); if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true); return null; } if (clients.size() == 1) { return clients.get(0); // Frequently used scenario } final long currentVal = numberOfModOps.incrementAndGet(); // Get absolute value of current val to ensure correctness even at 9 quintillion+ requests // make sure to truncate after the mod. This allows up to 2^31 clients. final int index = Math.abs((int) (currentVal % clients.size())); return clients.get(index); } private EVCacheClient getEVCacheClientForReadExcludeInternal(ServerGroup rsetUsed) { if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) { if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup); if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true); return null; } try { ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(rsetUsed); if (fallbackServerGroup == null || fallbackServerGroup.equals(rsetUsed)) { return null; } final List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup); return selectClient(clients); } catch (Throwable t) { log.error("Exception trying to get an readable EVCache Instances for zone {}", rsetUsed, t); return null; } } public EVCacheClient getEVCacheClientForReadExclude(ServerGroup rsetUsed) { EVCacheClient evCacheClient = getEVCacheClientForReadExcludeInternal(rsetUsed); // most common production scenario if (null == duetClientPool) { return evCacheClient; } // return duet if current client is not available or if duet is primary if (null == evCacheClient || duetPrimary.get()) { EVCacheClient duetClient = duetClientPool.getEVCacheClientForReadExclude(rsetUsed); // if duetClient is not present, fallback to evCacheClient return null == duetClient ? evCacheClient : duetClient; } return evCacheClient; } private EVCacheClient getEVCacheClientInternal(ServerGroup serverGroup) { if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) { if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup); if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true); return null; } try { List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(serverGroup); if (clients == null) { final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(); if (fallbackServerGroup == null) { if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null."); return null; } clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup); } return selectClient(clients); } catch (Throwable t) { log.error("Exception trying to get an readable EVCache Instances for ServerGroup {}", serverGroup, t); return null; } } public EVCacheClient getEVCacheClient(ServerGroup serverGroup) { EVCacheClient evCacheClient = getEVCacheClientInternal(serverGroup); // most common production scenario if (null == duetClientPool) { return evCacheClient; } // return duet if current client is not available or if duet is primary if (null == evCacheClient || duetPrimary.get()) { EVCacheClient duetClient = duetClientPool.getEVCacheClient(serverGroup); // if duetClient is not present, fallback to evCacheClient return null == duetClient ? evCacheClient : duetClient; } return evCacheClient; } private List<EVCacheClient> getEVCacheClientsForReadExcludingInternal(ServerGroup serverGroupToExclude) { if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) { if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup); if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true); return Collections.<EVCacheClient> emptyList(); } try { if (_retryAcrossAllReplicas.get()) { List<EVCacheClient> clients = new ArrayList<EVCacheClient>(memcachedReadInstancesByServerGroup.size() - 1); for (Iterator<ServerGroup> itr = memcachedReadInstancesByServerGroup.keySet().iterator(); itr .hasNext();) { final ServerGroup serverGroup = itr.next(); if (serverGroup.equals(serverGroupToExclude)) continue; final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(serverGroup); final EVCacheClient client = selectClient(clientList); if (client != null) clients.add(client); } return clients; } else { if(_maxRetries.get() == 1) { final EVCacheClient client = getEVCacheClientForReadExclude(serverGroupToExclude); if (client != null) return Collections.singletonList(client); } else { int maxNumberOfPossibleRetries = memcachedReadInstancesByServerGroup.size() - 1; if(maxNumberOfPossibleRetries > _maxRetries.get()) { maxNumberOfPossibleRetries = _maxRetries.get(); } final List<EVCacheClient> clients = new ArrayList<EVCacheClient>(_maxRetries.get()); for(int i = 0; i < maxNumberOfPossibleRetries; i++) { ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(serverGroupToExclude); if (fallbackServerGroup == null ) { return clients; } final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(fallbackServerGroup); final EVCacheClient client = selectClient(clientList); if (client != null) clients.add(client); } return clients; } } } catch (Throwable t) { log.error("Exception trying to get an readable EVCache Instances for zone {}", serverGroupToExclude, t); } return Collections.<EVCacheClient> emptyList(); } public List<EVCacheClient> getEVCacheClientsForReadExcluding(ServerGroup serverGroupToExclude) { List<EVCacheClient> evCacheClients = getEVCacheClientsForReadExcludingInternal(serverGroupToExclude); // most common production scenario if (null == duetClientPool) { return evCacheClients; } List<EVCacheClient> duetEVCacheClients = duetClientPool.getEVCacheClientsForReadExcluding(serverGroupToExclude); if (null == evCacheClients) return duetEVCacheClients; if (null == duetEVCacheClients) return evCacheClients; if (duetPrimary.get()) { List<EVCacheClient> clients = new ArrayList<>(duetEVCacheClients); clients.addAll(evCacheClients); return clients; } else { List<EVCacheClient> clients = new ArrayList<>(evCacheClients); clients.addAll(duetEVCacheClients); return clients; } } public boolean isInWriteOnly(ServerGroup serverGroup) { if (memcachedReadInstancesByServerGroup.containsKey(serverGroup)) { return false; } if(memcachedWriteInstancesByServerGroup.containsKey(serverGroup)) { return true; } return false; } private EVCacheClient[] getWriteOnlyEVCacheClientsInternal() { try { if((cloneWrite.get().size() == 0)) { int size = memcachedWriteInstancesByServerGroup.size() - memcachedReadInstancesByServerGroup.size(); if (size == 0) return new EVCacheClient[0]; final EVCacheClient[] clientArr = new EVCacheClient[size]; for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) { if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup) && size > 0) { final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup); if (clients.size() == 1) { clientArr[--size] = clients.get(0); // frequently used use case } else { final long currentVal = numberOfModOps.incrementAndGet(); final int index = (int) (currentVal % clients.size()); clientArr[--size] = (index < 0) ? clients.get(0) : clients.get(index); } } } return clientArr; } else { final List<EVCacheClient> evcacheClientList = new ArrayList<EVCacheClient>(); for(String cloneApp : cloneWrite.get()) { final EVCacheClient[] clients = manager.getEVCacheClientPool(cloneApp).getWriteOnlyEVCacheClients(); if(clients == null || clients.length == 0) continue; for(int i = 0; i < clients.length; i++) { evcacheClientList.add(clients[i]); } } for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) { if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup)) { final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup); if (clients.size() == 1) { evcacheClientList.add(clients.get(0)); // frequently used use case } else { final long currentVal = numberOfModOps.incrementAndGet(); final int index = (int) (currentVal % clients.size()); evcacheClientList.add((index < 0) ? clients.get(0) : clients.get(index)); } } } return evcacheClientList.toArray(new EVCacheClient[0]); } } catch (Throwable t) { log.error("Exception trying to get an array of writable EVCache Instances", t); return new EVCacheClient[0]; } } public EVCacheClient[] getWriteOnlyEVCacheClients() { EVCacheClient[] evCacheClients = getWriteOnlyEVCacheClientsInternal(); // most common production scenario if (null == duetClientPool) { return evCacheClients; } EVCacheClient[] duetEVCacheClients = duetClientPool.getWriteOnlyEVCacheClients(); if (null == evCacheClients || evCacheClients.length == 0) { return duetEVCacheClients; } if (null == duetEVCacheClients || duetEVCacheClients.length == 0) { return evCacheClients; } if (duetPrimary.get()) { // return write-only of duet app and all writers of original app to which duet is attached // get all writers of original app evCacheClients = getEVCacheClientForWriteInternal(); EVCacheClient[] allEVCacheClients = Arrays.copyOf(duetEVCacheClients, duetEVCacheClients.length + evCacheClients.length); System.arraycopy(evCacheClients, 0, allEVCacheClients, duetEVCacheClients.length, evCacheClients.length); return allEVCacheClients; } else { // return write-only of original app and all writers of duet app // get all writers of duet app duetEVCacheClients = duetClientPool.getEVCacheClientForWrite(); EVCacheClient[] allEVCacheClients = Arrays.copyOf(evCacheClients, evCacheClients.length + duetEVCacheClients.length); System.arraycopy(duetEVCacheClients, 0, allEVCacheClients, evCacheClients.length, duetEVCacheClients.length); return allEVCacheClients; } } EVCacheClient[] getAllWriteClients() { try { if(allEVCacheWriteClients != null) { EVCacheClient[] clientArray = allEVCacheWriteClients.next(); if(clientArray == null || clientArray.length == 0 ) { if (log.isInfoEnabled()) log.info("Refreshing the write client array."); try { refreshLock.lock(); clientArray = allEVCacheWriteClients.next(); if(clientArray == null || clientArray.length == 0 ) { refreshPool(false, true); clientArray = allEVCacheWriteClients.next(); } } finally { refreshLock.unlock(); } } if (log.isDebugEnabled()) log.debug("clientArray : " + clientArray); if(clientArray == null ) return new EVCacheClient[0]; return clientArray; } final EVCacheClient[] clientArr = new EVCacheClient[memcachedWriteInstancesByServerGroup.size()]; int i = 0; for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) { final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup); if (clients.size() == 1) { clientArr[i++] = clients.get(0); // frequently used usecase } else { final long currentVal = numberOfModOps.incrementAndGet(); final int index = (int) (currentVal % clients.size()); clientArr[i++] = (index < 0) ? clients.get(0) : clients.get(index); } } if(clientArr == null ) return new EVCacheClient[0]; return clientArr; } catch (Throwable t) { log.error("Exception trying to get an array of writable EVCache Instances", t); return new EVCacheClient[0]; } } private EVCacheClient[] getEVCacheClientForWriteInternal() { try { if((cloneWrite.get().size() == 0)) { return getAllWriteClients(); } else { final List<EVCacheClient> evcacheClientList = new ArrayList<EVCacheClient>(); final EVCacheClient[] clientArr = getAllWriteClients(); for(EVCacheClient client : clientArr) { evcacheClientList.add(client); } for(String cloneApp : cloneWrite.get()) { final EVCacheClient[] cloneWriteArray = manager.getEVCacheClientPool(cloneApp).getAllWriteClients(); for(int j = 0; j < cloneWriteArray.length; j++) { evcacheClientList.add(cloneWriteArray[j]); } } return evcacheClientList.toArray(new EVCacheClient[0]); } } catch (Throwable t) { log.error("Exception trying to get an array of writable EVCache Instances", t); return new EVCacheClient[0]; } } public EVCacheClient[] getEVCacheClientForWrite() { EVCacheClient[] evCacheClients = getEVCacheClientForWriteInternal(); // most common production scenario if (null == duetClientPool) { return evCacheClients; } EVCacheClient[] duetEVCacheClients = duetClientPool.getEVCacheClientForWrite(); if (null == evCacheClients || evCacheClients.length == 0) { return duetEVCacheClients; } if (null == duetEVCacheClients || duetEVCacheClients.length == 0) { return evCacheClients; } if (duetPrimary.get()) { EVCacheClient[] allEVCacheClients = Arrays.copyOf(duetEVCacheClients, duetEVCacheClients.length + evCacheClients.length); System.arraycopy(evCacheClients, 0, allEVCacheClients, duetEVCacheClients.length, evCacheClients.length); return allEVCacheClients; } else { EVCacheClient[] allEVCacheClients = Arrays.copyOf(evCacheClients, evCacheClients.length + duetEVCacheClients.length); System.arraycopy(duetEVCacheClients, 0, allEVCacheClients, evCacheClients.length, duetEVCacheClients.length); return allEVCacheClients; } } private void refresh() throws IOException { refresh(false); } protected boolean haveInstancesInServerGroupChanged(ServerGroup serverGroup, Set<InetSocketAddress> discoveredHostsInServerGroup) { final List<EVCacheClient> clients = memcachedInstancesByServerGroup.get(serverGroup); // 1. if we have discovered instances in zone but not in our map then // return immediately if (clients == null) return true; // 2. Do a quick check based on count (active, inactive and discovered) for (int i = 0; i < clients.size(); i++) { final int size = clients.size(); final EVCacheClient client = clients.get(i); final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver(); final int activeServerCount = connectionObserver.getActiveServerCount(); final int inActiveServerCount = connectionObserver.getInActiveServerCount(); final int sizeInDiscovery = discoveredHostsInServerGroup.size(); final int sizeInHashing = client.getNodeLocator().getAll().size(); if (i == 0) getConfigGauge("sizeInDiscovery", serverGroup).set(Long.valueOf(sizeInDiscovery)); if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tActive Count : " + activeServerCount + "\n\tInactive Count : " + inActiveServerCount + "\n\tDiscovery Count : " + sizeInDiscovery + "\n\tsizeInHashing : " + sizeInHashing); if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tActive Count : " + activeServerCount + "\n\tInactive Count : " + inActiveServerCount + "\n\tDiscovery Count : " + sizeInDiscovery + "\n\tsizeInHashing : " + sizeInHashing); final long currentTime = System.currentTimeMillis(); boolean reconcile = false; if (currentTime - lastReconcileTime > reconcileInterval.get()) { reconcile = true; lastReconcileTime = currentTime; getConfigGauge(EVCacheMetricsFactory.POOL_RECONCILE, serverGroup).set(Long.valueOf(1)); } else { getConfigGauge(EVCacheMetricsFactory.POOL_RECONCILE, serverGroup).set(Long.valueOf(0)); } final boolean hashingSizeDiff = (sizeInHashing != sizeInDiscovery && sizeInHashing != activeServerCount); if (reconcile || activeServerCount != sizeInDiscovery || inActiveServerCount > 0 || hashingSizeDiff) { if (log.isDebugEnabled()) log.debug("\n\t" + _appName + " & " + serverGroup + " experienced an issue.\n\tActive Server Count : " + activeServerCount); if (log.isDebugEnabled()) log.debug("\n\tInActive Server Count : " + inActiveServerCount + "\n\tDiscovered Instances : " + sizeInDiscovery); // 1. If a host is in discovery and we don't have an active or // inActive connection to it then we will have to refresh our // list. Typical case is we have replaced an existing node or // expanded the cluster. for (InetSocketAddress instance : discoveredHostsInServerGroup) { if (!connectionObserver.getActiveServers().containsKey(instance) && !connectionObserver.getInActiveServers().containsKey(instance)) { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + instance + " not found and will shutdown the client and init it again."); getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(1)); return true; } } // 2. If a host is not in discovery and is // inActive for more than 15 mins then we will have to refresh our // list. Typical case is we have replaced an existing node or // decreasing the cluster. Replacing an instance should not take // more than 20 mins (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html#types-of-instance-status-checks). // Even if it does then we will refresh the client twice which // should be ok. // NOTE : For a zombie instance this will mean that it will take // 15 mins after detaching and taking it OOS to be removed // unless we force a refresh // 12/5/2015 - Should we even do this anymore for (Entry<InetSocketAddress, Long> entry : connectionObserver.getInActiveServers().entrySet()) { if ((currentTime - entry.getValue().longValue()) > 1200000 && !discoveredHostsInServerGroup.contains(entry.getKey())) { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + entry.getKey() + " not found in discovery and will shutdown the client and init it again."); getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(2)); return true; } } // 3. Check to see if there are any inactive connections. If we // find inactive connections and this node is not in discovery // then we will refresh the client. final Collection<MemcachedNode> allNodes = client.getNodeLocator().getAll(); for (MemcachedNode node : allNodes) { if (node instanceof EVCacheNode) { final EVCacheNode evcNode = ((EVCacheNode) node); // If the connection to a node is not active then we // will reconnect the client. if (!evcNode.isActive() && !discoveredHostsInServerGroup.contains(evcNode.getSocketAddress())) { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; Node : " + node + " is not active. Will shutdown the client and init it again."); getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(3)); return true; } } } // 4. if there is a difference in the number of nodes in the // KetamaHashingMap then refresh if (hashingSizeDiff) { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; PoolSize : " + size + "; ActiveConnections : " + activeServerCount + "; InactiveConnections : " + inActiveServerCount + "; InDiscovery : " + sizeInDiscovery + "; InHashing : " + sizeInHashing + "; hashingSizeDiff : " + hashingSizeDiff + ". Since there is a diff in hashing size will shutdown the client and init it again."); getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(4)); return true; } // 5. If a host is in not discovery and we have an active connection to it for more than 20 mins then we will refresh // Typical case is we have replaced an existing node but it has zombie. We are able to connect to it (hypervisor) but not talk to it // or prana has shutdown successfully but not memcached. In such scenario we will refresh the cluster for(InetSocketAddress instance : connectionObserver.getActiveServers().keySet()) { if(!discoveredHostsInServerGroup.contains(instance)) { if(!evCacheDiscoveryConnectionLostSet.containsKey(instance)) { evCacheDiscoveryConnectionLostSet.put(instance, Long.valueOf(currentTime)); if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + instance + " not found in discovery. We will add to our list and monitor it."); } else { long lostDur = (currentTime - evCacheDiscoveryConnectionLostSet.get(instance).longValue()); if (lostDur >= 1200000) { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + instance + " not found in discovery for the past 20 mins and will shutdown the client and init it again."); getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(5)); evCacheDiscoveryConnectionLostSet.remove(instance); return true; } else { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + instance + " not found in discovery for " + lostDur + " msec."); } } } } // 9. If we have removed all instances or took them OOS in a // ServerGroup then shutdown the client if (sizeInDiscovery == 0) { if (activeServerCount == 0 || inActiveServerCount > activeServerCount) { if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; Will shutdown the client since there are no active servers and no servers for this ServerGroup in disocvery."); getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(9)); return true; } } } getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(0)); } reportPoolConifg(); return false; } private List<InetSocketAddress> getMemcachedSocketAddressList(final Set<InetSocketAddress> discoveredHostsInZone) { final List<InetSocketAddress> memcachedNodesInZone = new ArrayList<InetSocketAddress>(); for (InetSocketAddress hostAddress : discoveredHostsInZone) { memcachedNodesInZone.add(hostAddress); } return memcachedNodesInZone; } private void shutdownClientsInZone(List<EVCacheClient> clients) { if (clients == null || clients.isEmpty()) return; // Shutdown the old clients in 60 seconds, this will give ample time to // cleanup anything pending in its queue for (EVCacheClient oldClient : clients) { try { final boolean obsRemoved = oldClient.removeConnectionObserver(); if (log.isDebugEnabled()) log.debug("Connection observer removed " + obsRemoved); final boolean status = oldClient.shutdown(60, TimeUnit.SECONDS); if (log.isDebugEnabled()) log.debug("Shutting down -> Client {" + oldClient.toString() + "}; status : " + status); } catch (Exception ex) { log.error("Exception while shutting down the old Client", ex); } } } private void setupNewClientsByServerGroup(ServerGroup serverGroup, List<EVCacheClient> newClients) { final List<EVCacheClient> currentClients = memcachedInstancesByServerGroup.put(serverGroup, newClients); // if the zone is in write only mode then remove it from the Map final Property<Boolean> isZoneInWriteOnlyMode = writeOnlyFastPropertyMap.get(serverGroup); if (isZoneInWriteOnlyMode.get().booleanValue()) { memcachedReadInstancesByServerGroup.remove(serverGroup); } else { memcachedReadInstancesByServerGroup.put(serverGroup, newClients); } memcachedWriteInstancesByServerGroup.put(serverGroup, newClients); setupAllEVCacheWriteClientsArray(); if (currentClients == null || currentClients.isEmpty()) return; // Now since we have replace the old instances shutdown all the old // clients if (log.isDebugEnabled()) log.debug("Replaced an existing Pool for ServerGroup : " + serverGroup + "; and app " + _appName + " ;\n\tOldClients : " + currentClients + ";\n\tNewClients : " + newClients); for (EVCacheClient client : currentClients) { if (!client.isShutdown()) { if (log.isDebugEnabled()) log.debug("Shutting down in Fallback -> AppName : " + _appName + "; ServerGroup : " + serverGroup + "; client {" + client + "};"); try { if (client.getConnectionObserver() != null) { final boolean obsRemoved = client.removeConnectionObserver(); if (log.isDebugEnabled()) log.debug("Connection observer removed " + obsRemoved); } final boolean status = client.shutdown(5, TimeUnit.SECONDS); if (log.isDebugEnabled()) log.debug("Shutting down {" + client + "} ; status : " + status); } catch (Exception ex) { log.error("Exception while shutting down the old Client", ex); } } } // Paranoid Here. Even though we have shutdown the old clients do it // again as we noticed issues while shutting down MemcachedNodes shutdownClientsInZone(currentClients); } // Check if a zone has been moved to Write only. If so, remove the app from // the read map. // Similarly if the app has been moved to Read+Write from write only add it // back to the read map. private void updateMemcachedReadInstancesByZone() { for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) { final Property<Boolean> isZoneInWriteOnlyMode = writeOnlyFastPropertyMap.get(serverGroup); if (isZoneInWriteOnlyMode.get().booleanValue()) { if (memcachedReadInstancesByServerGroup.containsKey(serverGroup)) { memcachedReadInstancesByServerGroup.remove(serverGroup); } } else { if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup)) { memcachedReadInstancesByServerGroup.put(serverGroup, memcachedInstancesByServerGroup.get(serverGroup)); } } // if we lose over 50% of instances put that zone in writeonly mode. final List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(serverGroup); if (clients != null && !clients.isEmpty()) { final EVCacheClient client = clients.get(0); if (client != null) { final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver(); if (connectionObserver != null) { final int activeServerCount = connectionObserver.getActiveServerCount(); final int inActiveServerCount = connectionObserver.getInActiveServerCount(); if (inActiveServerCount > activeServerCount) { memcachedReadInstancesByServerGroup.remove(serverGroup); getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(1)); } else { getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(2)); } } } } else { final List<EVCacheClient> clientsWrite = memcachedInstancesByServerGroup.get(serverGroup); if (clientsWrite != null && !clientsWrite.isEmpty()) { getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(0)); } } } if (memcachedReadInstancesByServerGroup.size() != memcachedFallbackReadInstances.getSize()) { memcachedFallbackReadInstances = new ServerGroupCircularIterator(memcachedReadInstancesByServerGroup.keySet()); Map<String, Set<ServerGroup>> readServerGroupByZoneMap = new ConcurrentHashMap<String, Set<ServerGroup>>(); for (ServerGroup serverGroup : memcachedReadInstancesByServerGroup.keySet()) { Set<ServerGroup> serverGroupList = readServerGroupByZoneMap.get(serverGroup.getZone()); if (serverGroupList == null) { serverGroupList = new HashSet<ServerGroup>(); readServerGroupByZoneMap.put(serverGroup.getZone(), serverGroupList); } serverGroupList.add(serverGroup); } Map<String, ServerGroupCircularIterator> _readServerGroupByZone = new ConcurrentHashMap<String, ServerGroupCircularIterator>(); for (Entry<String, Set<ServerGroup>> readServerGroupByZoneEntry : readServerGroupByZoneMap.entrySet()) { _readServerGroupByZone.put(readServerGroupByZoneEntry.getKey(), new ServerGroupCircularIterator(readServerGroupByZoneEntry.getValue())); } this.readServerGroupByZone = _readServerGroupByZone; localServerGroupIterator = readServerGroupByZone.get(_zone); } } private void cleanupMemcachedInstances(boolean force) { pingServers(); for (Iterator<Entry<ServerGroup, List<EVCacheClient>>> it = memcachedInstancesByServerGroup.entrySet().iterator(); it.hasNext();) { final Entry<ServerGroup, List<EVCacheClient>> serverGroupEntry = it.next(); final List<EVCacheClient> instancesInAServerGroup = serverGroupEntry.getValue(); boolean removeEntry = false; for (EVCacheClient client : instancesInAServerGroup) { final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver(); if (connectionObserver.getActiveServerCount() == 0 && connectionObserver.getInActiveServerCount() > 0) { removeEntry = true; } } if (force || removeEntry) { final ServerGroup serverGroup = serverGroupEntry.getKey(); memcachedReadInstancesByServerGroup.remove(serverGroup); memcachedWriteInstancesByServerGroup.remove(serverGroup); for (EVCacheClient client : instancesInAServerGroup) { if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + " has no active servers. Cleaning up this ServerGroup."); client.shutdown(0, TimeUnit.SECONDS); client.getConnectionObserver().shutdown(); } it.remove(); allEVCacheWriteClients = null; } } } private synchronized void refresh(boolean force) throws IOException { final long start = System.currentTimeMillis(); if (log.isDebugEnabled()) log.debug("refresh APP : " + _appName + "; force : " + force); try { final Map<ServerGroup, EVCacheServerGroupConfig> instances = provider.discoverInstances(_appName); if (log.isDebugEnabled()) log.debug("instances : " + instances); // if no instances are found check to see if a clean up is needed // and bail immediately. if (instances == null || instances.isEmpty()) { if (!memcachedInstancesByServerGroup.isEmpty()) cleanupMemcachedInstances(false); return; } for(ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) { if(!instances.containsKey(serverGroup)) { if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + " does not exist or is not enabled or is out of service. We will shutdown this client and remove it."); serverGroupDisabled(serverGroup); } } boolean updateAllEVCacheWriteClients = false; for (Entry<ServerGroup, EVCacheServerGroupConfig> serverGroupEntry : instances.entrySet()) { final ServerGroup serverGroup = serverGroupEntry.getKey(); final EVCacheServerGroupConfig config = serverGroupEntry.getValue(); final Set<InetSocketAddress> discoverdInstanceInServerGroup = config.getInetSocketAddress(); final String zone = serverGroup.getZone(); final Set<InetSocketAddress> discoveredHostsInServerGroup = (discoverdInstanceInServerGroup == null) ? Collections.<InetSocketAddress> emptySet() : discoverdInstanceInServerGroup; if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tSize : " + discoveredHostsInServerGroup.size() + "\n\tInstances in ServerGroup : " + discoveredHostsInServerGroup); if (discoveredHostsInServerGroup.size() == 0 && memcachedInstancesByServerGroup.containsKey(serverGroup)) { if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + " has no active servers. Cleaning up this ServerGroup."); serverGroupDisabled(serverGroup); continue; } boolean instanceChangeInServerGroup = force; if (instanceChangeInServerGroup) { if (log.isWarnEnabled()) log.warn("FORCE REFRESH :: AppName :" + _appName + "; ServerGroup : " + serverGroup + "; Changed : " + instanceChangeInServerGroup); } else { instanceChangeInServerGroup = haveInstancesInServerGroupChanged(serverGroup, discoveredHostsInServerGroup); if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tinstanceChangeInServerGroup : " + instanceChangeInServerGroup); if (!instanceChangeInServerGroup) { // quick exit as everything looks fine. No new instances // found and were inactive if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; Changed : " + instanceChangeInServerGroup); continue; } } // Let us create a list of SocketAddress from the discovered // instances in zone final List<InetSocketAddress> memcachedSAInServerGroup = getMemcachedSocketAddressList(discoveredHostsInServerGroup); if (memcachedSAInServerGroup.size() > 0) { // now since there is a change with the instances in the // zone. let us go ahead and create a new EVCacheClient with // the new settings final int poolSize = _poolSize.get(); final List<EVCacheClient> newClients = new ArrayList<EVCacheClient>(poolSize); for (int i = 0; i < poolSize; i++) { final int maxQueueSize = EVCacheConfig.getInstance().getPropertyRepository().get(_appName + ".max.queue.length", Integer.class).orElse(16384).get(); EVCacheClient client; try { client = new EVCacheClient(_appName, zone, i, config, memcachedSAInServerGroup, maxQueueSize, _maxReadQueueSize, _readTimeout, _bulkReadTimeout, _opQueueMaxBlockTime, _operationTimeout, this, isDuet); newClients.add(client); final int id = client.getId(); if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; intit : client.getId() : " + id); lastReconcileTime = System.currentTimeMillis(); } catch (Exception e) { incrementFailure(EVCacheMetricsFactory.INTERNAL_POOL_INIT_ERROR, config.getServerGroup()); log.error("Unable to create EVCacheClient for app - " + _appName + " and Server Group - " + serverGroup.getName(), e); } } if (newClients.size() > 0) { setupNewClientsByServerGroup(serverGroup, newClients); updateAllEVCacheWriteClients = true; } } } if(updateAllEVCacheWriteClients) { setupAllEVCacheWriteClientsArray(); } // Check to see if a zone has been removed, if so remove them from // the active list if (memcachedInstancesByServerGroup.size() > instances.size()) { if (log.isDebugEnabled()) log.debug("\n\tAppName :" + _appName + ";\n\tServerGroup Discovered : " + instances.keySet() + ";\n\tCurrent ServerGroup in EVCache Client : " + memcachedInstancesByServerGroup.keySet()); cleanupMemcachedInstances(false); } updateMemcachedReadInstancesByZone(); updateQueueStats(); if (_pingServers.get()) pingServers(); } catch (Throwable t) { log.error("Exception while refreshing the Server list", t); } finally { EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_POOL_REFRESH, tagList, Duration.ofMillis(100)).record(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); } if (log.isDebugEnabled()) log.debug("refresh APP : " + _appName + "; DONE"); } private void setupAllEVCacheWriteClientsArray() { final List<EVCacheClient[]> newClients = new ArrayList<EVCacheClient[]>(_poolSize.get()); try { final int serverGroupSize = memcachedWriteInstancesByServerGroup.size(); for(int ind = 0; ind < _poolSize.get(); ind++) { final EVCacheClient[] clientArr = new EVCacheClient[serverGroupSize]; int i = 0; for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) { final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup); if(clients.size() > ind) { clientArr[i++] = clients.get(ind); // frequently used usecase } else { log.warn("Incorrect pool size detected for AppName : " + _appName + "; PoolSize " + _poolSize.get() + "; serverGroup : " + serverGroup + "; ind : " + ind + "; i : " + i); if(clients.size() > 0) { clientArr[i++] = clients.get(0); } } } newClients.add(clientArr); } this.allEVCacheWriteClients = new CircularIterator<EVCacheClient[]>(newClients); } catch (Throwable t) { log.error("Exception trying to create an array of writable EVCache Instances for App : " + _appName, t); } } private void updateQueueStats() { for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) { List<EVCacheClient> clients = memcachedInstancesByServerGroup.get(serverGroup); for(EVCacheClient client : clients) { getStatsGauge(EVCacheMetricsFactory.POOL_WRITE_Q_SIZE, client).set(Long.valueOf(client.getWriteQueueLength())); getStatsGauge(EVCacheMetricsFactory.POOL_READ_Q_SIZE, client).set(Long.valueOf(client.getReadQueueLength())); if(refreshConnectionOnReadQueueFull.get()) { final Collection<MemcachedNode> allNodes = client.getNodeLocator().getAll(); for (MemcachedNode node : allNodes) { if (node instanceof EVCacheNode) { final EVCacheNode evcNode = ((EVCacheNode) node); if(evcNode.getReadQueueSize() >= refreshConnectionOnReadQueueFullSize.get().intValue()) { EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.POOL_REFRESH_QUEUE_FULL, evcNode.getTags()).increment(); client.getEVCacheMemcachedClient().reconnectNode(evcNode); } } } } } } } public void pingServers() { try { final Map<ServerGroup, List<EVCacheClient>> allServers = getAllInstancesByZone(); for (Entry<ServerGroup, List<EVCacheClient>> entry : allServers.entrySet()) { final List<EVCacheClient> listOfClients = entry.getValue(); for (EVCacheClient client : listOfClients) { final Map<SocketAddress, String> versions = client.getVersions(); for (Entry<SocketAddress, String> vEntry : versions.entrySet()) { if (log.isDebugEnabled()) log.debug("Host : " + vEntry.getKey() + " : " + vEntry.getValue()); } } } if (duetClientPool != null) duetClientPool.pingServers(); } catch (Throwable t) { log.error("Error while pinging the servers", t); } } public void serverGroupDisabled(final ServerGroup serverGroup) { if (memcachedInstancesByServerGroup.containsKey(serverGroup)) { if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + " has no active servers. Cleaning up this ServerGroup."); final List<EVCacheClient> clients = memcachedInstancesByServerGroup.remove(serverGroup); memcachedReadInstancesByServerGroup.remove(serverGroup); memcachedWriteInstancesByServerGroup.remove(serverGroup); setupAllEVCacheWriteClientsArray(); for (EVCacheClient client : clients) { if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tClient : " + client + " will be shutdown in 30 seconds."); client.shutdown(30, TimeUnit.SECONDS); client.getConnectionObserver().shutdown(); } } if (duetClientPool != null) duetClientPool.serverGroupDisabled(serverGroup); } public void refreshAsync(MemcachedNode node) { if (log.isInfoEnabled()) log.info("Pool is being refresh as the EVCacheNode is not available. " + node.toString()); if(!_disableAsyncRefresh.get()) { if (node instanceof EVCacheNode) { final EVCacheNode evcNode = ((EVCacheNode) node); EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.POOL_REFRESH_ASYNC, evcNode.getTags()).increment(); } boolean force = (System.currentTimeMillis() - lastReconcileTime) > ( manager.getDefaultRefreshInterval().get() * 1000 ) ? true : false; if(!force) force = !node.isActive(); refreshPool(true, force); } if (duetClientPool != null) duetClientPool.refreshAsync(node); } public void run() { try { refresh(); } catch (Throwable t) { if (log.isDebugEnabled()) log.debug("Error Refreshing EVCache Instance list for " + _appName, t); } } void shutdown() { if (log.isDebugEnabled()) log.debug("EVCacheClientPool for App : " + _appName + " and Zone : " + _zone + " is being shutdown."); _shutdown = true; for(ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) { if (log.isDebugEnabled()) log.debug("\nSHUTDOWN\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup); serverGroupDisabled(serverGroup); } setupMonitoring(); } private Gauge getConfigGauge(String metric, ServerGroup serverGroup) { final String name = (serverGroup == null ? metric : metric + serverGroup.getName() + isInWriteOnly(serverGroup)); Gauge gauge = gaugeMap.get(name ); if(gauge != null) return gauge; final List<Tag> tags = new ArrayList<Tag>(5); EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName); tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, metric)); if(serverGroup != null) { tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName())); } final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.INTERNAL_POOL_SG_CONFIG, tags); gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id); gaugeMap.put(name, gauge); return gauge; } private Gauge getStatsGauge(String metric, EVCacheClient client) { final String name = metric + client.getServerGroupName(); Gauge gauge = gaugeMap.get(name ); if(gauge != null) return gauge; final List<Tag> tags = new ArrayList<Tag>(4); EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName); tags.add(new BasicTag(EVCacheMetricsFactory.STAT_NAME, metric)); tags.add(new BasicTag(EVCacheMetricsFactory.CONNECTION_ID, String.valueOf(client.getId()))); tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName())); final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.INTERNAL_STATS, tags); gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id); gaugeMap.put(name, gauge); return gauge; } private void incrementFailure(String metric, ServerGroup serverGroup) { final List<Tag> tags = new ArrayList<Tag>(4); EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName); tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, metric)); tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName())); EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.INTERNAL_POOL_INIT_ERROR, tags); } private void reportPoolConifg() { final int size = getPoolSize(); for(ServerGroup key : memcachedInstancesByServerGroup.keySet()) { getConfigGauge("poolSize", key).set(memcachedInstancesByServerGroup.get(key).size()); final EVCacheClient client = memcachedInstancesByServerGroup.get(key).get(0); if(client != null) { getConfigGauge("readTimeout", key).set(getReadTimeout().get()); getConfigGauge("bulkReadTimeout", key).set(getBulkReadTimeout().get()); getConfigGauge("numberOfServerGoups", key).set(memcachedInstancesByServerGroup.size()); getConfigGauge("maxReadQueueLength", key).set(_maxReadQueueSize.get()); getConfigGauge("instanceCount", key).set(client.getMemcachedNodesInZone().size());; final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver(); if(connectionObserver != null) { final int activeServerCount = connectionObserver.getActiveServerCount(); final int inActiveServerCount = connectionObserver.getInActiveServerCount(); final int sizeInHashing = client.getNodeLocator().getAll().size(); getConfigGauge("activeServerCount", key).set(Long.valueOf(activeServerCount)); getConfigGauge("activeConnectionCount", key).set(Long.valueOf(activeServerCount * size)); getConfigGauge("inActiveServerCount", key).set(Long.valueOf(inActiveServerCount)); getConfigGauge("sizeInHashing", key).set(Long.valueOf(sizeInHashing)); } final List<EVCacheClient> readClients = memcachedReadInstancesByServerGroup.get(key); if (readClients != null && readClients.size() > 0) { getConfigGauge(EVCacheMetricsFactory.POOL_READ_INSTANCES, key).set(Long.valueOf(readClients.get(0).getConnectionObserver().getActiveServerCount())); } final List<EVCacheClient> writeClients = memcachedWriteInstancesByServerGroup.get(key); if (writeClients != null && writeClients.size() > 0) { getConfigGauge(EVCacheMetricsFactory.POOL_WRITE_INSTANCES, key).set(Long.valueOf(writeClients.get(0).getConnectionObserver().getActiveServerCount())); } } } } private void setupMonitoring() { try { final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + _appName + ",SubGroup=pool"); final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } if (!_shutdown) { mbeanServer.registerMBean(this, mBeanName); } } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception", e); } } public int getInstanceCount() { int instances = 0; for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) { instances += memcachedInstancesByServerGroup.get(serverGroup).get(0).getConnectionObserver().getActiveServerCount(); } if (duetClientPool != null) instances += duetClientPool.getInstanceCount(); return instances; } public Map<String, String> getInstancesByZone() { Map<String, String> instanceMap = new HashMap<String, String>(); for (ServerGroup zone : memcachedInstancesByServerGroup.keySet()) { final List<EVCacheClient> instanceList = memcachedInstancesByServerGroup.get(zone); instanceMap.put(zone.toString(), instanceList.toString()); } if (duetClientPool != null) instanceMap.putAll(duetClientPool.getInstancesByZone()); return instanceMap; } public Map<String, Integer> getInstanceCountByZone() { final Map<String, Integer> instancesByZone = new HashMap<String, Integer>(memcachedInstancesByServerGroup.size() * 2); for (ServerGroup zone : memcachedInstancesByServerGroup.keySet()) { instancesByZone.put(zone.getName(), Integer.valueOf(memcachedInstancesByServerGroup.get(zone).get(0).getConnectionObserver().getActiveServerCount())); } if (duetClientPool != null) instancesByZone.putAll(duetClientPool.getInstanceCountByZone()); return instancesByZone; } public Map<String, String> getReadZones() { final Map<String, String> instanceMap = new HashMap<String, String>(); for (ServerGroup key : memcachedReadInstancesByServerGroup.keySet()) { instanceMap.put(key.getName(), memcachedReadInstancesByServerGroup.get(key).toString()); } if (duetClientPool != null) instanceMap.putAll(duetClientPool.getReadZones()); return instanceMap; } public Map<String, Integer> getReadInstanceCountByZone() { final Map<String, Integer> instanceMap = new HashMap<String, Integer>(); for (ServerGroup key : memcachedReadInstancesByServerGroup.keySet()) { instanceMap.put(key.getName(), Integer.valueOf(memcachedReadInstancesByServerGroup.get(key).get(0) .getConnectionObserver().getActiveServerCount())); } if (duetClientPool != null) instanceMap.putAll(duetClientPool.getReadInstanceCountByZone()); return instanceMap; } public Map<String, String> getWriteZones() { final Map<String, String> instanceMap = new HashMap<String, String>(); for (ServerGroup key : memcachedWriteInstancesByServerGroup.keySet()) { instanceMap.put(key.toString(), memcachedWriteInstancesByServerGroup.get(key).toString()); } if (duetClientPool != null) instanceMap.putAll(duetClientPool.getWriteZones()); return instanceMap; } private Map<ServerGroup, List<EVCacheClient>> getAllInstancesByZoneInternal() { return Collections.unmodifiableMap(memcachedInstancesByServerGroup); } public Map<ServerGroup, List<EVCacheClient>> getAllInstancesByZone() { if (duetClientPool != null) { Map<ServerGroup, List<EVCacheClient>> allInstanceMap = new ConcurrentHashMap<>(); allInstanceMap.putAll(getAllInstancesByZoneInternal()); allInstanceMap.putAll(duetClientPool.getAllInstancesByZone()); return Collections.unmodifiableMap(allInstanceMap); } return getAllInstancesByZoneInternal(); } Map<ServerGroup, List<EVCacheClient>> getAllInstancesByServerGroupInternal() { return memcachedInstancesByServerGroup; } public Map<ServerGroup, List<EVCacheClient>> getAllInstancesByServerGroup() { if (duetClientPool == null) { return getAllInstancesByServerGroupInternal(); } Map<ServerGroup, List<EVCacheClient>> allInstancesByServerGroup = new ConcurrentHashMap<>(); allInstancesByServerGroup.putAll(getAllInstancesByServerGroupInternal()); allInstancesByServerGroup.putAll(duetClientPool.getAllInstancesByServerGroup()); return allInstancesByServerGroup; } private Map<String, Integer> getWriteInstanceCountByZoneInternal() { final Map<String, Integer> instanceMap = new HashMap<String, Integer>(); for (ServerGroup key : memcachedWriteInstancesByServerGroup.keySet()) { instanceMap.put(key.toString(), Integer.valueOf(memcachedWriteInstancesByServerGroup.get(key).get(0).getConnectionObserver().getActiveServerCount())); } return instanceMap; } public Map<String, Integer> getWriteInstanceCountByZone() { Map<String, Integer> instanceMap = getWriteInstanceCountByZoneInternal(); if (duetClientPool != null) instanceMap.putAll(duetClientPool.getWriteInstanceCountByZone()); return instanceMap; } private Map<String, String> getReadServerGroupByZoneInternal() { final Map<String, String> instanceMap = new HashMap<String, String>(); for (String key : readServerGroupByZone.keySet()) { instanceMap.put(key, readServerGroupByZone.get(key).toString()); } return instanceMap; } public Map<String, String> getReadServerGroupByZone() { Map<String, String> instanceMap = getReadServerGroupByZoneInternal(); if (duetClientPool != null) instanceMap.putAll(duetClientPool.getReadServerGroupByZone()); return instanceMap; } public void refreshPool() { refreshPool(false, true); if (duetClientPool != null) duetClientPool.refreshPool(false, true); } public void refreshPool(boolean async, boolean force) { if (log.isDebugEnabled()) log.debug("Refresh Pool : async : " + async + "; force : " + force); try { if(async && asyncRefreshExecutor.getQueue().size() == 0) { asyncRefreshExecutor.submit(new Runnable() { @Override public void run() { try { refresh(force); } catch (Exception e) { log.error(e.getMessage(), e); } } }); } else { refresh(force); } } catch (Throwable t) { if (log.isDebugEnabled()) log.debug("Error Refreshing EVCache Instance list from MBean : " + _appName, t); } if (duetClientPool != null) duetClientPool.refreshPool(async, force); } public String getFallbackServerGroup() { if (memcachedFallbackReadInstances.getSize() != 0 || duetClientPool == null) return memcachedFallbackReadInstances.toString(); return duetClientPool.getFallbackServerGroup(); } public boolean supportsFallback() { return memcachedFallbackReadInstances.getSize() > 1 || (duetClientPool != null && duetPrimary.get() && duetClientPool.supportsFallback()); } public boolean isLogEventEnabled() { return (logOperations.get() > 0); } public boolean shouldLogOperation(String key, String op) { if (!isLogEventEnabled()) return false; if (!logOperationCalls.get().contains(op)) return false; return key.hashCode() % 1000 <= logOperations.get(); } @Override public String getLocalServerGroupCircularIterator() { return (localServerGroupIterator == null) ? (duetClientPool == null ? "NONE" : duetClientPool.getLocalServerGroupCircularIterator()) : localServerGroupIterator.toString(); } @Override public String getEVCacheWriteClientsCircularIterator() { return (allEVCacheWriteClients == null) ? (duetClientPool == null ? "NONE" : duetClientPool.getEVCacheWriteClientsCircularIterator()) : allEVCacheWriteClients.toString(); } public String getPoolDetails() { return toString(); } @Override public String toString() { return "\nEVCacheClientPool [\n\t_appName=" + _appName + ",\n\t_zone=" + _zone + ",\n\tlocalServerGroupIterator=" + localServerGroupIterator + ",\n\t_poolSize=" + _poolSize + ",\n\t_readTimeout=" + _readTimeout + ",\n\t_bulkReadTimeout=" + _bulkReadTimeout + ",\n\tlogOperations=" + logOperations + ",\n\t_opQueueMaxBlockTime=" + _opQueueMaxBlockTime + ",\n\t_operationTimeout=" + _operationTimeout + ",\n\t_maxReadQueueSize=" + _maxReadQueueSize + ",\n\t_pingServers=" + _pingServers + ",\n\twriteOnlyFastPropertyMap=" + writeOnlyFastPropertyMap + ",\n\tnumberOfModOps=" + numberOfModOps.get() + ",\n\t_shutdown=" + _shutdown + ",\n\tmemcachedInstancesByServerGroup=" + memcachedInstancesByServerGroup + ",\n\tmemcachedReadInstancesByServerGroup=" + memcachedReadInstancesByServerGroup + ",\n\tmemcachedWriteInstancesByServerGroup=" + memcachedWriteInstancesByServerGroup + ",\n\treadServerGroupByZone=" + readServerGroupByZone + ",\n\tmemcachedFallbackReadInstances=" + memcachedFallbackReadInstances + "\n]" + ", \n\tallEVCacheWriteClients=" + allEVCacheWriteClients + "\n]" + (duetClientPool == null ? "" : duetClientPool.toString()); } public int getPoolSize() { return _poolSize.get() + (duetClientPool == null ? 0 : duetClientPool.getPoolSize()); } public Property<Integer> getLogOperations() { return logOperations; } public Property<Integer> getOpQueueMaxBlockTime() { return _opQueueMaxBlockTime; } public Property<Integer> getOperationTimeout() { if (duetClientPool !=null && duetPrimary.get()) { return duetClientPool.getOperationTimeout(); } return _operationTimeout; } public Property<Integer> getMaxReadQueueSize() { return _maxReadQueueSize; } public Property<Boolean> getPingServers() { return _pingServers; } public long getNumberOfModOps() { return numberOfModOps.get(); } public boolean isShutdown() { return _shutdown; } public String getZone() { return this._zone; } public String getAppName() { return this._appName; } public EVCacheClientPoolManager getEVCacheClientPoolManager() { return this.manager; } public Map<ServerGroup, Property<Boolean>> getWriteOnlyFastPropertyMap() { if (duetClientPool != null) { Map<ServerGroup, Property<Boolean>> allMap = new ConcurrentHashMap<>(); allMap.putAll(writeOnlyFastPropertyMap); allMap.putAll(duetClientPool.getWriteOnlyFastPropertyMap()); return Collections.unmodifiableMap(allMap); } return Collections.unmodifiableMap(writeOnlyFastPropertyMap); } public Property<Integer> getReadTimeout() { if (duetClientPool != null && duetPrimary.get()) { return duetClientPool.getReadTimeout(); } return _readTimeout; } public Property<Integer> getBulkReadTimeout() { return _bulkReadTimeout; } /* * This method is helpful in cases where there is typically a large backlog of work queued up, and is * expensive to loose all that work when a client is shut down. * Block the thread until all the queues are processed or at most 30 seconds. * Will return the count of items left in the queues. 0 means none left. */ public int join() { int size = 0; int counter = 0; do { for(List<EVCacheClient> clientList : getAllInstancesByServerGroup().values()) { for(EVCacheClient client : clientList) { size +=client.getWriteQueueLength(); size +=client.getReadQueueLength(); } } if(size > 0) { try { Thread.sleep(10); } catch (InterruptedException e) { log.error(""); } } if(counter++ > 3000) break; } while(size > 0); return size; } public long getLastReconcileTime() { return lastReconcileTime; } public Property<Set<String>> getOperationToLog() { return logOperationCalls; } }
4,043
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPoolManager.java
package com.netflix.evcache.pool; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import javax.annotation.PreDestroy; import javax.inject.Inject; import javax.inject.Singleton; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.evcache.EVCacheImpl; import com.netflix.evcache.EVCacheInMemoryCache; import com.netflix.evcache.connection.ConnectionFactoryBuilder; import com.netflix.evcache.connection.IConnectionBuilder; import com.netflix.evcache.event.EVCacheEventListener; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.transcoders.Transcoder; /** * A manager that holds Pools for each EVCache app. When this class is * initialized all the EVCache apps defined in the property evcache.appsToInit * will be initialized and added to the pool. If a service knows all the EVCache * app it uses, then it can define this property and pass a list of EVCache apps * that needs to be initialized. * * An EVCache app can also be initialized by Injecting * <code>EVCacheClientPoolManager</code> and calling <code> * initEVCache(<app name>) * </code> * * This typically should be done in the client libraries that need to initialize * an EVCache app. For Example VHSViewingHistoryLibrary in its initLibrary * initializes EVCACHE_VH by calling * * <pre> * {@literal @}Inject * public VHSViewingHistoryLibrary(EVCacheClientPoolManager instance,...) { * .... * instance.initEVCache("EVCACHE_VH"); * ... * } * </pre> * * @author smadappa * */ @edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "DM_CONVERT_CASE", "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" }) @Singleton public class EVCacheClientPoolManager { /** * <b>NOTE : Should be the only static referenced variables</b> * **/ private static final Logger log = LoggerFactory.getLogger(EVCacheClientPoolManager.class); private volatile static EVCacheClientPoolManager instance; private final Property<Integer> defaultReadTimeout; private final Property<String> logEnabledApps; private final Property<Integer> defaultRefreshInterval; private final Map<String, EVCacheClientPool> poolMap = new ConcurrentHashMap<String, EVCacheClientPool>(); private final Map<EVCacheClientPool, ScheduledFuture<?>> scheduledTaskMap = new HashMap<EVCacheClientPool, ScheduledFuture<?>>(); private final EVCacheScheduledExecutor asyncExecutor; private final EVCacheExecutor syncExecutor; private final List<EVCacheEventListener> evcacheEventListenerList; private final IConnectionBuilder connectionFactoryProvider; private final EVCacheNodeList evcacheNodeList; private final EVCacheConfig evcConfig; @Inject public EVCacheClientPoolManager(IConnectionBuilder connectionFactoryprovider, EVCacheNodeList evcacheNodeList, EVCacheConfig evcConfig) { instance = this; this.connectionFactoryProvider = connectionFactoryprovider; this.evcacheNodeList = evcacheNodeList; this.evcConfig = evcConfig; this.evcacheEventListenerList = new CopyOnWriteArrayList<EVCacheEventListener>(); String clientCurrentInstanceId = null; if(clientCurrentInstanceId == null) clientCurrentInstanceId= System.getenv("EC2_INSTANCE_ID"); if(clientCurrentInstanceId == null) clientCurrentInstanceId= System.getenv("NETFLIX_INSTANCE_ID"); if(log.isInfoEnabled()) log.info("\nClient Current InstanceId from env = " + clientCurrentInstanceId); if(clientCurrentInstanceId == null && EVCacheConfig.getInstance().getPropertyRepository() != null) clientCurrentInstanceId = EVCacheConfig.getInstance().getPropertyRepository().get("EC2_INSTANCE_ID", String.class).orElse(null).get(); if(clientCurrentInstanceId == null && EVCacheConfig.getInstance().getPropertyRepository() != null) clientCurrentInstanceId = EVCacheConfig.getInstance().getPropertyRepository().get("NETFLIX_INSTANCE_ID", String.class).orElse(null).get(); if(clientCurrentInstanceId != null && !clientCurrentInstanceId.equalsIgnoreCase("localhost")) { this.defaultReadTimeout = EVCacheConfig.getInstance().getPropertyRepository().get("default.read.timeout", Integer.class).orElse(20); if(log.isInfoEnabled()) log.info("\nClient Current InstanceId = " + clientCurrentInstanceId + " which is probably a cloud location. The default.read.timeout = " + defaultReadTimeout); } else { //Assuming this is not in cloud so bump up the timeouts this.defaultReadTimeout = EVCacheConfig.getInstance().getPropertyRepository().get("default.read.timeout", Integer.class).orElse(750); if(log.isInfoEnabled()) log.info("\n\nClient Current InstanceId = " + clientCurrentInstanceId + ". Probably a non-cloud instance. The default.read.timeout = " + defaultReadTimeout + "\n\n"); } this.logEnabledApps = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheClientPoolManager.log.apps", String.class).orElse("*"); this.defaultRefreshInterval = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheClientPoolManager.refresh.interval", Integer.class).orElse(60); this.asyncExecutor = new EVCacheScheduledExecutor(Runtime.getRuntime().availableProcessors(),Runtime.getRuntime().availableProcessors(), 30, TimeUnit.SECONDS, new ThreadPoolExecutor.CallerRunsPolicy(), "scheduled"); asyncExecutor.prestartAllCoreThreads(); this.syncExecutor = new EVCacheExecutor(Runtime.getRuntime().availableProcessors(),Runtime.getRuntime().availableProcessors(), 30, TimeUnit.SECONDS, new ThreadPoolExecutor.CallerRunsPolicy(), "pool"); syncExecutor.prestartAllCoreThreads(); initAtStartup(); } public IConnectionBuilder getConnectionFactoryProvider() { return connectionFactoryProvider; } public void addEVCacheEventListener(EVCacheEventListener listener) { this.evcacheEventListenerList.add(listener); } public void addEVCacheEventListener(EVCacheEventListener listener, int index) { if(index < evcacheEventListenerList.size()) { this.evcacheEventListenerList.add(index, listener); } else { this.evcacheEventListenerList.add(listener); } } public void removeEVCacheEventListener(EVCacheEventListener listener) { this.evcacheEventListenerList.remove(listener); } public List<EVCacheEventListener> getEVCacheEventListeners() { return this.evcacheEventListenerList; } public EVCacheConfig getEVCacheConfig() { return this.evcConfig; } /** * @deprecated. Please use DependencyInjection (@Inject) to obtain * {@link EVCacheClientPoolManager}. The use of this can result in * unintended behavior where you will not be able to talk to evcache * instances. */ @Deprecated public static EVCacheClientPoolManager getInstance() { if (instance == null) { new EVCacheClientPoolManager(new ConnectionFactoryBuilder(), new SimpleNodeListProvider(), EVCacheConfig.getInstance()); if (!EVCacheConfig.getInstance().getPropertyRepository().get("evcache.use.simple.node.list.provider", Boolean.class).orElse(false).get()) { if(log.isDebugEnabled()) log.debug("Please make sure EVCacheClientPoolManager is injected first. This is not the appropriate way to init EVCacheClientPoolManager." + " If you are using simple node list provider please set evcache.use.simple.node.list.provider property to true.", new Exception()); } } return instance; } public void initAtStartup() { final String appsToInit = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.appsToInit", String.class).orElse("").get(); if (appsToInit != null && appsToInit.length() > 0) { final StringTokenizer apps = new StringTokenizer(appsToInit, ","); while (apps.hasMoreTokens()) { final String app = getAppName(apps.nextToken()); if (log.isDebugEnabled()) log.debug("Initializing EVCache - " + app); initEVCache(app); } } } /** * Will init the given EVCache app call. If one is already initialized for * the given app method returns without doing anything. * * @param app * - name of the evcache app */ public final synchronized EVCacheClientPool initEVCache(String app) { return initEVCache(app, false); } public final synchronized EVCacheClientPool initEVCache(String app, boolean isDuet) { if (app == null || (app = app.trim()).length() == 0) throw new IllegalArgumentException("param app name null or space"); final String APP = getAppName(app); if (poolMap.containsKey(APP)) return poolMap.get(APP); final EVCacheClientPool pool = new EVCacheClientPool(APP, evcacheNodeList, asyncExecutor, this, isDuet); scheduleRefresh(pool); poolMap.put(APP, pool); return pool; } private void scheduleRefresh(EVCacheClientPool pool) { final ScheduledFuture<?> task = asyncExecutor.scheduleWithFixedDelay(pool, 30, defaultRefreshInterval.get(), TimeUnit.SECONDS); scheduledTaskMap.put(pool, task); } /** * Given the appName get the EVCacheClientPool. If the app is already * created then will return the existing instance. If not one will be * created and returned. * * @param _app * - name of the evcache app * @return the Pool for the give app. * @throws IOException */ public EVCacheClientPool getEVCacheClientPool(String _app) { final String app = getAppName(_app); final EVCacheClientPool evcacheClientPool = poolMap.get(app); if (evcacheClientPool != null) return evcacheClientPool; initEVCache(app); return poolMap.get(app); } public Map<String, EVCacheClientPool> getAllEVCacheClientPool() { return new HashMap<String, EVCacheClientPool>(poolMap); } @PreDestroy public void shutdown() { asyncExecutor.shutdown(); syncExecutor.shutdown(); for (EVCacheClientPool pool : poolMap.values()) { pool.shutdown(); } } public boolean shouldLog(String appName) { if ("*".equals(logEnabledApps.get())) return true; if (logEnabledApps.get().indexOf(appName) != -1) return true; return false; } public Property<Integer> getDefaultReadTimeout() { return defaultReadTimeout; } public Property<Integer> getDefaultRefreshInterval() { return defaultRefreshInterval; } public EVCacheScheduledExecutor getEVCacheScheduledExecutor() { return asyncExecutor; } public EVCacheExecutor getEVCacheExecutor() { return syncExecutor; } private String getAppName(String _app) { _app = _app.toUpperCase(); Boolean ignoreAlias = EVCacheConfig.getInstance().getPropertyRepository() .get("EVCacheClientPoolManager." + _app + ".ignoreAlias", Boolean.class) .orElseGet("EVCacheClientPoolManager.ignoreAlias") .orElse(false).get(); final String app = ignoreAlias ? _app : EVCacheConfig.getInstance().getPropertyRepository() .get("EVCacheClientPoolManager." + _app + ".alias", String.class) .orElse(_app).get().toUpperCase(); if (log.isDebugEnabled()) log.debug("Original App Name : " + _app + "; Alias App Name : " + app); if(app != null && app.length() > 0) return app.toUpperCase(); return _app; } private WriteLock writeLock = new ReentrantReadWriteLock().writeLock(); private final Map<String, EVCacheInMemoryCache<?>> inMemoryMap = new ConcurrentHashMap<String, EVCacheInMemoryCache<?>>(); @SuppressWarnings("unchecked") public <T> EVCacheInMemoryCache<T> createInMemoryCache(Transcoder<T> tc, EVCacheImpl impl) { final String name = impl.getCachePrefix() == null ? impl.getAppName() : impl.getAppName() + impl.getCachePrefix(); EVCacheInMemoryCache<T> cache = (EVCacheInMemoryCache<T>) inMemoryMap.get(name); if(cache == null) { writeLock.lock(); if((cache = getInMemoryCache(name)) == null) { cache = new EVCacheInMemoryCache<T>(impl.getAppName(), tc, impl); inMemoryMap.put(name, cache); } writeLock.unlock(); } return cache; } @SuppressWarnings("unchecked") public <T> EVCacheInMemoryCache<T> getInMemoryCache(String appName) { return (EVCacheInMemoryCache<T>) inMemoryMap.get(appName); } }
4,044
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheExecutor.java
package com.netflix.evcache.pool; import java.lang.management.ManagementFactory; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import javax.management.MBeanServer; import javax.management.ObjectName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.netflix.archaius.api.Property; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.patterns.ThreadPoolMonitor; public class EVCacheExecutor extends ThreadPoolExecutor implements EVCacheExecutorMBean { private static final Logger log = LoggerFactory.getLogger(EVCacheExecutor.class); private final Property<Integer> maxAsyncPoolSize; private final Property<Integer> coreAsyncPoolSize; private final String name; public EVCacheExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, RejectedExecutionHandler handler, String name) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryBuilder().setDaemon(true).setNameFormat( "EVCacheExecutor-" + name + "-%d").build()); this.name = name; maxAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheExecutor." + name + ".max.size", Integer.class).orElse(maximumPoolSize); setMaximumPoolSize(maxAsyncPoolSize.get()); coreAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheExecutor." + name + ".core.size", Integer.class).orElse(corePoolSize); setCorePoolSize(coreAsyncPoolSize.get()); setKeepAliveTime(keepAliveTime, unit); maxAsyncPoolSize.subscribe(this::setMaximumPoolSize); coreAsyncPoolSize.subscribe(i -> { setCorePoolSize(i); prestartAllCoreThreads(); }); setupMonitoring(name); ThreadPoolMonitor.attach(EVCacheMetricsFactory.getInstance().getRegistry(), this, EVCacheMetricsFactory.INTERNAL_EXECUTOR + "-" + name); } private void setupMonitoring(String name) { try { ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name); MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } mbeanServer.registerMBean(this, mBeanName); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception", e); } } public void shutdown() { try { ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name); MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); mbeanServer.unregisterMBean(mBeanName); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception", e); } super.shutdown(); } @Override public int getQueueSize() { return getQueue().size(); } }
4,045
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheScheduledExecutorMBean.java
package com.netflix.evcache.pool; public interface EVCacheScheduledExecutorMBean { boolean isShutdown(); boolean isTerminating(); boolean isTerminated(); int getCorePoolSize(); int getMaximumPoolSize(); int getQueueSize(); int getPoolSize(); int getActiveCount(); int getLargestPoolSize(); long getTaskCount(); long getCompletedTaskCount(); }
4,046
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheScheduledExecutor.java
package com.netflix.evcache.pool; import java.lang.management.ManagementFactory; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import javax.management.MBeanServer; import javax.management.ObjectName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.netflix.archaius.api.Property; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.patterns.ThreadPoolMonitor; public class EVCacheScheduledExecutor extends ScheduledThreadPoolExecutor implements EVCacheScheduledExecutorMBean { private static final Logger log = LoggerFactory.getLogger(EVCacheScheduledExecutor.class); private final Property<Integer> maxAsyncPoolSize; private final Property<Integer> coreAsyncPoolSize; private final String name; public EVCacheScheduledExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, RejectedExecutionHandler handler, String name) { super(corePoolSize, handler); this.name = name; maxAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get(name + "executor.max.size", Integer.class).orElse(maximumPoolSize); setMaximumPoolSize(maxAsyncPoolSize.get()); coreAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get(name + "executor.core.size", Integer.class).orElse(corePoolSize); setCorePoolSize(coreAsyncPoolSize.get()); setKeepAliveTime(keepAliveTime, unit); final ThreadFactory asyncFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat( "EVCacheScheduledExecutor-" + name + "-%d").build(); setThreadFactory(asyncFactory); maxAsyncPoolSize.subscribe(this::setMaximumPoolSize); coreAsyncPoolSize.subscribe(i -> { setCorePoolSize(i); prestartAllCoreThreads(); }); setupMonitoring(name); ThreadPoolMonitor.attach(EVCacheMetricsFactory.getInstance().getRegistry(), this, EVCacheMetricsFactory.INTERNAL_EXECUTOR_SCHEDULED + "-" + name); } private void setupMonitoring(String name) { try { ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name); MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } mbeanServer.registerMBean(this, mBeanName); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception", e); } } public void shutdown() { try { ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name); MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); mbeanServer.unregisterMBean(mBeanName); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Exception", e); } super.shutdown(); } @Override public int getQueueSize() { return getQueue().size(); } }
4,047
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheExecutorMBean.java
package com.netflix.evcache.pool; public interface EVCacheExecutorMBean { boolean isShutdown(); boolean isTerminating(); boolean isTerminated(); int getCorePoolSize(); int getMaximumPoolSize(); int getQueueSize(); int getPoolSize(); int getActiveCount(); int getLargestPoolSize(); long getTaskCount(); long getCompletedTaskCount(); }
4,048
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheKetamaNodeLocatorConfiguration.java
package com.netflix.evcache.pool; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.HashMap; import java.util.Map; import com.netflix.archaius.api.Property; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.MemcachedNode; import net.spy.memcached.util.DefaultKetamaNodeLocatorConfiguration; public class EVCacheKetamaNodeLocatorConfiguration extends DefaultKetamaNodeLocatorConfiguration { protected final EVCacheClient client; protected final Property<Integer> bucketSize; protected final Map<MemcachedNode, String> socketAddresses = new HashMap<MemcachedNode, String>(); public EVCacheKetamaNodeLocatorConfiguration(EVCacheClient client) { this.client = client; this.bucketSize = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".bucket.size", Integer.class) .orElseGet(client.getAppName()+ ".bucket.size").orElse(super.getNodeRepetitions()); } /** * Returns the number of discrete hashes that should be defined for each * node in the continuum. * * @return NUM_REPS repetitions. */ public int getNodeRepetitions() { return bucketSize.get().intValue(); } /** * Returns the socket address of a given MemcachedNode. * * @param node - The MemcachedNode which we're interested in * @return The socket address of the given node format is of the following * For ec2 classic instances - "publicHostname/privateIp:port" (ex - ec2-174-129-159-31.compute-1.amazonaws.com/10.125.47.114:11211) * For ec2 vpc instances - "privateIp/privateIp:port" (ex - 10.125.47.114/10.125.47.114:11211) * privateIp is also known as local ip */ @Override public String getKeyForNode(MemcachedNode node, int repetition) { String result = socketAddresses.get(node); if(result == null) { final SocketAddress socketAddress = node.getSocketAddress(); if(socketAddress instanceof InetSocketAddress) { final InetSocketAddress isa = (InetSocketAddress)socketAddress; result = isa.getHostName() + '/' + isa.getAddress().getHostAddress() + ":11211"; } else { result=String.valueOf(socketAddress); if (result.startsWith("/")) { result = result.substring(1); } } socketAddresses.put(node, result); } return result + "-" + repetition; } @Override public String toString() { return "EVCacheKetamaNodeLocatorConfiguration [EVCacheClient=" + client + ", BucketSize=" + getNodeRepetitions() + "]"; } }
4,049
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/ChunkTranscoder.java
package com.netflix.evcache.pool; import net.spy.memcached.CachedData; import net.spy.memcached.transcoders.BaseSerializingTranscoder; import net.spy.memcached.transcoders.Transcoder; /** * A local transcoder used only by EVCache client to ensure we don't try to deserialize chunks * * @author smadappa * */ public class ChunkTranscoder extends BaseSerializingTranscoder implements Transcoder<CachedData> { public ChunkTranscoder() { super(Integer.MAX_VALUE); } public boolean asyncDecode(CachedData d) { return false; } public CachedData decode(CachedData d) { return d; } public CachedData encode(CachedData o) { return o; } public int getMaxSize() { return Integer.MAX_VALUE; } }
4,050
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheNodeList.java
package com.netflix.evcache.pool; import java.io.IOException; import java.net.UnknownServiceException; import java.util.Map; public interface EVCacheNodeList { /** * Discover memcached instances suitable for our use from the Discovery * Service. * * * @param appName The EVCache app for which we need instances * @throws UnknownServiceException * if no suitable instances can be found * @throws IllegalStateException * if an error occurred in the Discovery service * * TODO : Add a fallback to get the list say from PersistedProperties */ public abstract Map<ServerGroup, EVCacheServerGroupConfig> discoverInstances(String appName) throws IOException; }
4,051
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheNodeLocator.java
package com.netflix.evcache.pool; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; import com.netflix.archaius.api.Property; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.DefaultHashAlgorithm; import net.spy.memcached.EVCacheMemcachedNodeROImpl; import net.spy.memcached.HashAlgorithm; import net.spy.memcached.MemcachedNode; import net.spy.memcached.NodeLocator; import net.spy.memcached.util.KetamaNodeLocatorConfiguration; public class EVCacheNodeLocator implements NodeLocator { private static final Logger log = LoggerFactory.getLogger(EVCacheNodeLocator.class); private TreeMap<Long, MemcachedNode> ketamaNodes; protected final EVCacheClient client; private final Property<Boolean> partialStringHash; private final Property<String> hashDelimiter; private final Collection<MemcachedNode> allNodes; private final HashAlgorithm hashingAlgorithm; private final KetamaNodeLocatorConfiguration config; /** * Create a new KetamaNodeLocator using specified nodes and the specifed * hash algorithm and configuration. * * @param nodes * The List of nodes to use in the Ketama consistent hash * continuum * @param alg * The hash algorithm to use when choosing a node in the Ketama * consistent hash continuum * @param conf */ public EVCacheNodeLocator(EVCacheClient client, List<MemcachedNode> nodes, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) { super(); this.allNodes = nodes; this.hashingAlgorithm = alg; this.config = conf; this.client = client; this.partialStringHash = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.on.partial.key", Boolean.class) .orElseGet(client.getAppName()+ ".hash.on.partial.key").orElse(false); this.hashDelimiter = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.delimiter", String.class) .orElseGet(client.getAppName() + ".hash.delimiter").orElse(":"); setKetamaNodes(nodes); } private EVCacheNodeLocator(EVCacheClient client, TreeMap<Long, MemcachedNode> smn, Collection<MemcachedNode> an, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) { super(); this.ketamaNodes = smn; this.allNodes = an; this.hashingAlgorithm = alg; this.config = conf; this.client = client; this.partialStringHash = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.on.partial.key", Boolean.class) .orElseGet(client.getAppName()+ ".hash.on.partial.key").orElse(false); this.hashDelimiter = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.delimiter", String.class) .orElseGet(client.getAppName() + ".hash.delimiter").orElse(":"); } /* * @see net.spy.memcached.NodeLocator#getAll */ public Collection<MemcachedNode> getAll() { return allNodes; } /* * @see net.spy.memcached.NodeLocator#getPrimary */ public MemcachedNode getPrimary(String k) { if (partialStringHash.get()) { final int index = k.indexOf(hashDelimiter.get()); if (index > 0) { k = k.substring(0, index); } } final long hash = hashingAlgorithm.hash(k); Map.Entry<Long, MemcachedNode> entry = ketamaNodes.ceilingEntry(hash); if (entry == null) { entry = ketamaNodes.firstEntry(); } return entry.getValue(); } /* * @return Returns the max key in the hashing distribution */ public long getMaxKey() { return getKetamaNodes().lastKey().longValue(); } public MemcachedNode getNodeForKey(long _hash) { long start = (log.isDebugEnabled()) ? System.nanoTime() : 0; try { Long hash = Long.valueOf(_hash); hash = ketamaNodes.ceilingKey(hash); if (hash == null) { hash = ketamaNodes.firstKey(); } return ketamaNodes.get(hash); } finally { if (log.isDebugEnabled()) { final long end = System.nanoTime(); log.debug("getNodeForKey : \t" + (end - start) / 1000); } } } public Iterator<MemcachedNode> getSequence(String k) { final List<MemcachedNode> allKetamaNodes = new ArrayList<MemcachedNode>(getKetamaNodes().values()); Collections.shuffle(allKetamaNodes); return allKetamaNodes.iterator(); } public NodeLocator getReadonlyCopy() { final TreeMap<Long, MemcachedNode> ketamaNaodes = new TreeMap<Long, MemcachedNode>(getKetamaNodes()); final Collection<MemcachedNode> aNodes = new ArrayList<MemcachedNode>(allNodes.size()); // Rewrite the values a copy of the map. for (Map.Entry<Long, MemcachedNode> me : ketamaNaodes.entrySet()) { me.setValue(new EVCacheMemcachedNodeROImpl(me.getValue())); } // Copy the allNodes collection. for (MemcachedNode n : allNodes) { aNodes.add(new EVCacheMemcachedNodeROImpl(n)); } return new EVCacheNodeLocator(client, ketamaNaodes, aNodes, hashingAlgorithm, config); } /** * @return the ketamaNodes */ protected TreeMap<Long, MemcachedNode> getKetamaNodes() { return ketamaNodes; } /** * @return the readonly view of ketamaNodes. This is mailnly for admin * purposes */ public Map<Long, MemcachedNode> getKetamaNodeMap() { return Collections.<Long, MemcachedNode> unmodifiableMap(ketamaNodes); } /** * Setup the KetamaNodeLocator with the list of nodes it should use. * * @param nodes * a List of MemcachedNodes for this KetamaNodeLocator to use in * its continuum */ protected final void setKetamaNodes(List<MemcachedNode> nodes) { TreeMap<Long, MemcachedNode> newNodeMap = new TreeMap<Long, MemcachedNode>(); final int numReps = config.getNodeRepetitions(); for (MemcachedNode node : nodes) { // Ketama does some special work with md5 where it reuses chunks. if (hashingAlgorithm == DefaultHashAlgorithm.KETAMA_HASH) { for (int i = 0; i < numReps / 4; i++) { final String hashString = config.getKeyForNode(node, i); byte[] digest = DefaultHashAlgorithm.computeMd5(hashString); if (log.isDebugEnabled()) log.debug("digest : " + digest); for (int h = 0; h < 4; h++) { long k = ((long) (digest[3 + h * 4] & 0xFF) << 24) | ((long) (digest[2 + h * 4] & 0xFF) << 16) | ((long) (digest[1 + h * 4] & 0xFF) << 8) | (digest[h * 4] & 0xFF); newNodeMap.put(Long.valueOf(k), node); if (log.isDebugEnabled()) log.debug("Key : " + hashString + " ; hash : " + k + "; node " + node ); } } } else { for (int i = 0; i < numReps; i++) { final Long hashL = Long.valueOf(hashingAlgorithm.hash(config.getKeyForNode(node, i))); newNodeMap.put(hashL, node); } } } if (log.isDebugEnabled()) log.debug("NewNodeMapSize : " + newNodeMap.size() + "; MapSize : " + (numReps * nodes.size())); if (log.isTraceEnabled()) { for(Long key : newNodeMap.keySet()) { log.trace("Hash : " + key + "; Node : " + newNodeMap.get(key)); } } ketamaNodes = newNodeMap; } @Override public void updateLocator(List<MemcachedNode> nodes) { setKetamaNodes(nodes); } @Override public String toString() { return "EVCacheNodeLocator [ketamaNodes=" + ketamaNodes + ", EVCacheClient=" + client + ", partialStringHash=" + partialStringHash + ", hashDelimiter=" + hashDelimiter + ", allNodes=" + allNodes + ", hashingAlgorithm=" + hashingAlgorithm + ", config=" + config + "]"; } }
4,052
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientUtil.java
package com.netflix.evcache.pool; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import com.netflix.evcache.EVCacheKey; import net.spy.memcached.transcoders.Transcoder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.EVCacheLatch; import com.netflix.evcache.EVCacheLatch.Policy; import com.netflix.evcache.operation.EVCacheLatchImpl; import net.spy.memcached.CachedData; public class EVCacheClientUtil { private static final Logger log = LoggerFactory.getLogger(EVCacheClientUtil.class); private final ChunkTranscoder ct = new ChunkTranscoder(); private final String _appName; private final long _operationTimeout; public EVCacheClientUtil(String appName, long operationTimeout) { this._appName = appName; this._operationTimeout = operationTimeout; } //TODO: Remove this todo. This method has been made hashing agnostic. /** * TODO : once metaget is available we need to get the remaining ttl from an existing entry and use it */ public EVCacheLatch add(EVCacheKey evcKey, final CachedData cd, Transcoder evcacheValueTranscoder, int timeToLive, Policy policy, final EVCacheClient[] clients, int latchCount, boolean fixMissing) throws Exception { if (cd == null) return null; final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy, latchCount, _appName); CachedData cdHashed = null; Boolean firstStatus = null; for (EVCacheClient client : clients) { CachedData cd1; if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) { if(cdHashed == null) { final EVCacheValue val = new EVCacheValue(evcKey.getCanonicalKey(client.isDuetClient()), cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis()); cdHashed = evcacheValueTranscoder.encode(val); } cd1 = cdHashed; } else { cd1 = cd; } String key = evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); final Future<Boolean> f = client.add(key, timeToLive, cd1, latch); if (log.isDebugEnabled()) log.debug("ADD : Op Submitted : APP " + _appName + ", key " + key + "; future : " + f + "; client : " + client); if(fixMissing) { boolean status = f.get().booleanValue(); if(!status) { // most common case if(firstStatus == null) { for(int i = 0; i < clients.length; i++) { latch.countDown(); } return latch; } else { return fixup(client, clients, evcKey, timeToLive, policy); } } if(firstStatus == null) firstStatus = Boolean.valueOf(status); } } return latch; } private EVCacheLatch fixup(EVCacheClient sourceClient, EVCacheClient[] destClients, EVCacheKey evcKey, int timeToLive, Policy policy) { final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy, destClients.length, _appName); try { final CachedData readData = sourceClient.get(evcKey.getDerivedKey(sourceClient.isDuetClient(), sourceClient.getHashingAlgorithm(), sourceClient.shouldEncodeHashKey(), sourceClient.getMaxDigestBytes(), sourceClient.getMaxHashLength(), sourceClient.getBaseEncoder()), ct, false, false); if(readData != null) { sourceClient.touch(evcKey.getDerivedKey(sourceClient.isDuetClient(), sourceClient.getHashingAlgorithm(), sourceClient.shouldEncodeHashKey(), sourceClient.getMaxDigestBytes(), sourceClient.getMaxHashLength(), sourceClient.getBaseEncoder()), timeToLive); for(EVCacheClient destClient : destClients) { destClient.set(evcKey.getDerivedKey(destClient.isDuetClient(), destClient.getHashingAlgorithm(), destClient.shouldEncodeHashKey(), destClient.getMaxDigestBytes(), destClient.getMaxHashLength(), destClient.getBaseEncoder()), readData, timeToLive, latch); } } latch.await(_operationTimeout, TimeUnit.MILLISECONDS); } catch (Exception e) { log.error("Error reading the data", e); } return latch; } }
4,053
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClient.java
package com.netflix.evcache.pool; import java.io.BufferedInputStream; import java.io.IOException; import java.io.PrintWriter; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.*; import java.util.zip.CRC32; import java.util.zip.Checksum; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.evcache.EVCache; import com.netflix.evcache.EVCache.Call; import com.netflix.evcache.EVCacheConnectException; import com.netflix.evcache.EVCacheException; import com.netflix.evcache.EVCacheLatch; import com.netflix.evcache.EVCacheReadQueueException; import com.netflix.evcache.EVCacheSerializingTranscoder; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.operation.EVCacheFutures; import com.netflix.evcache.operation.EVCacheItem; import com.netflix.evcache.operation.EVCacheItemMetaData; import com.netflix.evcache.operation.EVCacheLatchImpl; import com.netflix.evcache.pool.observer.EVCacheConnectionObserver; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.evcache.util.KeyHasher; import com.netflix.evcache.util.KeyHasher.HashingAlgorithm; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Tag; import net.spy.memcached.CASValue; import net.spy.memcached.CachedData; import net.spy.memcached.ConnectionFactory; import net.spy.memcached.EVCacheMemcachedClient; import net.spy.memcached.EVCacheNode; import net.spy.memcached.MemcachedClient; import net.spy.memcached.MemcachedNode; import net.spy.memcached.NodeLocator; import net.spy.memcached.internal.ListenableFuture; import net.spy.memcached.internal.OperationCompletionListener; import net.spy.memcached.internal.OperationFuture; import net.spy.memcached.transcoders.Transcoder; import rx.Scheduler; import rx.Single; @SuppressWarnings({"rawtypes", "unchecked"}) @edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "REC_CATCH_EXCEPTION", "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE" }) public class EVCacheClient { private static final Logger log = LoggerFactory.getLogger(EVCacheClient.class); private final ConnectionFactory connectionFactory; private final EVCacheMemcachedClient evcacheMemcachedClient; private final List<InetSocketAddress> memcachedNodesInZone; private EVCacheConnectionObserver connectionObserver = null; private boolean shutdown = false; private final int id; private final String appName; private final String zone; private final ServerGroup serverGroup; private final EVCacheServerGroupConfig config; private final int maxWriteQueueSize; private final Property<Integer> readTimeout; private final Property<Integer> bulkReadTimeout; private final Property<Integer> maxReadQueueSize; private final Property<Boolean> ignoreInactiveNodes; private final Property<Boolean> enableChunking; private final Property<Boolean> hashKeyByServerGroup; private final Property<Boolean> shouldEncodeHashKey; private final Property<Integer> maxDigestBytes; private final Property<Integer> maxHashLength; private final Property<Integer> chunkSize, writeBlock; private final Property<String> encoderBase; private final ChunkTranscoder chunkingTranscoder; private final EVCacheSerializingTranscoder decodingTranscoder; private static final int SPECIAL_BYTEARRAY = (8 << 8); private final EVCacheClientPool pool; // private Counter addCounter = null; private final Property<Boolean> ignoreTouch; private List<Tag> tags; private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>(); private final Property<String> hashingAlgo; protected final Counter operationsCounter; private final boolean isDuetClient; EVCacheClient(String appName, String zone, int id, EVCacheServerGroupConfig config, List<InetSocketAddress> memcachedNodesInZone, int maxQueueSize, Property<Integer> maxReadQueueSize, Property<Integer> readTimeout, Property<Integer> bulkReadTimeout, Property<Integer> opQueueMaxBlockTime, Property<Integer> operationTimeout, EVCacheClientPool pool, boolean isDuetClient) throws IOException { this.memcachedNodesInZone = memcachedNodesInZone; this.id = id; this.appName = appName; this.zone = zone; this.config = config; this.serverGroup = config.getServerGroup(); this.readTimeout = readTimeout; this.bulkReadTimeout = bulkReadTimeout; this.maxReadQueueSize = maxReadQueueSize; // this.operationTimeout = operationTimeout; this.pool = pool; this.isDuetClient = isDuetClient; final List<Tag> tagList = new ArrayList<Tag>(4); EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, appName); tagList.add(new BasicTag(EVCacheMetricsFactory.CONNECTION_ID, String.valueOf(id))); tagList.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName())); this.tags = Collections.<Tag>unmodifiableList(new ArrayList(tagList)); tagList.add(new BasicTag(EVCacheMetricsFactory.STAT_NAME, EVCacheMetricsFactory.POOL_OPERATIONS)); operationsCounter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_STATS, tagList); this.enableChunking = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName()+ ".chunk.data", Boolean.class).orElseGet(appName + ".chunk.data").orElse(false); this.chunkSize = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".chunk.size", Integer.class).orElseGet(appName + ".chunk.size").orElse(1180); this.writeBlock = EVCacheConfig.getInstance().getPropertyRepository().get(appName + "." + this.serverGroup.getName() + ".write.block.duration", Integer.class).orElseGet(appName + ".write.block.duration").orElse(25); this.chunkingTranscoder = new ChunkTranscoder(); this.maxWriteQueueSize = maxQueueSize; this.ignoreTouch = EVCacheConfig.getInstance().getPropertyRepository().get(appName + "." + this.serverGroup.getName() + ".ignore.touch", Boolean.class).orElseGet(appName + ".ignore.touch").orElse(false); this.connectionFactory = pool.getEVCacheClientPoolManager().getConnectionFactoryProvider().getConnectionFactory(this); this.connectionObserver = new EVCacheConnectionObserver(this); this.ignoreInactiveNodes = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".ignore.inactive.nodes", Boolean.class).orElse(true); this.evcacheMemcachedClient = new EVCacheMemcachedClient(connectionFactory, memcachedNodesInZone, readTimeout, this); this.evcacheMemcachedClient.addObserver(connectionObserver); this.decodingTranscoder = new EVCacheSerializingTranscoder(Integer.MAX_VALUE); decodingTranscoder.setCompressionThreshold(Integer.MAX_VALUE); this.hashKeyByServerGroup = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".hash.key", Boolean.class).orElse(null); this.hashingAlgo = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".hash.algo", String.class).orElseGet(appName + ".hash.algo").orElse("siphash24"); this.shouldEncodeHashKey = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".hash.encode", Boolean.class).orElse(null); this.maxDigestBytes = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".max.digest.bytes", Integer.class).orElse(null); this.maxHashLength = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".max.hash.length", Integer.class).orElse(null); this.encoderBase = EVCacheConfig.getInstance().getPropertyRepository().get(this.serverGroup.getName() + ".hash.encoder", String.class).orElse("base64"); ping(); } public void ping() { try { final Map<SocketAddress, String> versions = getVersions(); for (Entry<SocketAddress, String> vEntry : versions.entrySet()) { if (log.isDebugEnabled()) log.debug("Host : " + vEntry.getKey() + " : " + vEntry.getValue()); } } catch (Throwable t) { log.error("Error while pinging the servers", t); } } public boolean isDuetClient() { return isDuetClient; } public Boolean shouldEncodeHashKey() { return this.shouldEncodeHashKey.get(); } public String getBaseEncoder() { return this.encoderBase.get(); } public Integer getMaxDigestBytes() { return this.maxDigestBytes.get(); } public Integer getMaxHashLength() { return this.maxHashLength.get(); } private Collection<String> validateReadQueueSize(Collection<String> canonicalKeys, EVCache.Call call) { if (evcacheMemcachedClient.getNodeLocator() == null) return canonicalKeys; final Collection<String> retKeys = new ArrayList<>(canonicalKeys.size()); for (String key : canonicalKeys) { final MemcachedNode node = evcacheMemcachedClient.getNodeLocator().getPrimary(key); if (node instanceof EVCacheNode) { final EVCacheNode evcNode = (EVCacheNode) node; if (!evcNode.isAvailable(call)) { continue; } final int size = evcNode.getReadQueueSize(); final boolean canAddToOpQueue = size < (maxReadQueueSize.get() * 2); // if (log.isDebugEnabled()) log.debug("Bulk Current Read Queue // Size - " + size + " for app " + appName + " & zone " + zone + // " ; node " + node); if (!canAddToOpQueue) { final String hostName; if(evcNode.getSocketAddress() instanceof InetSocketAddress) { hostName = ((InetSocketAddress)evcNode.getSocketAddress()).getHostName(); } else { hostName = evcNode.getSocketAddress().toString(); } incrementFailure(EVCacheMetricsFactory.READ_QUEUE_FULL, call, hostName); if (log.isDebugEnabled()) log.debug("Read Queue Full on Bulk Operation for app : " + appName + "; zone : " + zone + "; Current Size : " + size + "; Max Size : " + maxReadQueueSize.get() * 2); } else { retKeys.add(key); } } } return retKeys; } private void incrementFailure(String metric, EVCache.Call call) { incrementFailure(metric, call, null); } private void incrementFailure(String metric, EVCache.Call call, String host) { Counter counter = counterMap.get(metric); if(counter == null) { final List<Tag> tagList = new ArrayList<Tag>(6); tagList.addAll(tags); if(call != null) { tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, call.name())); switch(call) { case GET: case GETL: case GET_AND_TOUCH: case ASYNC_GET: case BULK: case GET_ALL: tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ)); break; default : tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.WRITE)); break; } } tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric)); if(host != null) tagList.add(new BasicTag(EVCacheMetricsFactory.FAILED_HOST, host)); counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_FAIL, tagList); counterMap.put(metric, counter); } counter.increment(); } public void reportWrongKeyReturned(String hostName) { incrementFailure(EVCacheMetricsFactory.WRONG_KEY_RETURNED, null, hostName); } private boolean ensureWriteQueueSize(MemcachedNode node, String key, EVCache.Call call) throws EVCacheException { if (node instanceof EVCacheNode) { final EVCacheNode evcNode = (EVCacheNode) node; int i = 0; while (true) { final int size = evcNode.getWriteQueueSize(); final boolean canAddToOpQueue = size < maxWriteQueueSize; if (log.isDebugEnabled()) log.debug("App : " + appName + "; zone : " + zone + "; key : " + key + "; WriteQSize : " + size); if (canAddToOpQueue) break; try { Thread.sleep(writeBlock.get()); } catch (InterruptedException e) { throw new EVCacheException("Thread was Interrupted", e); } if(i++ > 3) { final String hostName; if(evcNode.getSocketAddress() instanceof InetSocketAddress) { hostName = ((InetSocketAddress)evcNode.getSocketAddress()).getHostName(); } else { hostName = evcNode.getSocketAddress().toString(); } incrementFailure(EVCacheMetricsFactory.INACTIVE_NODE, call, hostName); if (log.isDebugEnabled()) log.debug("Node : " + evcNode + " for app : " + appName + "; zone : " + zone + " is not active. Will Fail Fast and the write will be dropped for key : " + key); evcNode.shutdown(); return false; } } } return true; } private boolean validateNode(String key, boolean _throwException, EVCache.Call call) throws EVCacheException, EVCacheConnectException { final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); // First check if the node is active if (node instanceof EVCacheNode) { final EVCacheNode evcNode = (EVCacheNode) node; final String hostName; if(evcNode.getSocketAddress() instanceof InetSocketAddress) { hostName = ((InetSocketAddress)evcNode.getSocketAddress()).getHostName(); } else { hostName = evcNode.getSocketAddress().toString(); } if (!evcNode.isAvailable(call)) { incrementFailure(EVCacheMetricsFactory.INACTIVE_NODE, call, hostName); if (log.isDebugEnabled()) log.debug("Node : " + node + " for app : " + appName + "; zone : " + zone + " is not active. Will Fail Fast so that we can fallback to Other Zone if available."); if (_throwException) throw new EVCacheConnectException("Connection for Node : " + node + " for app : " + appName + "; zone : " + zone + " is not active"); return false; } final int size = evcNode.getReadQueueSize(); final boolean canAddToOpQueue = size < maxReadQueueSize.get(); if (log.isDebugEnabled()) log.debug("Current Read Queue Size - " + size + " for app " + appName + " & zone " + zone + " and node : " + evcNode); if (!canAddToOpQueue) { incrementFailure(EVCacheMetricsFactory.READ_QUEUE_FULL, call, hostName); if (log.isDebugEnabled()) log.debug("Read Queue Full for Node : " + node + "; app : " + appName + "; zone : " + zone + "; Current Size : " + size + "; Max Size : " + maxReadQueueSize.get()); if (_throwException) throw new EVCacheReadQueueException("Read Queue Full for Node : " + node + "; app : " + appName + "; zone : " + zone + "; Current Size : " + size + "; Max Size : " + maxReadQueueSize.get()); return false; } } return true; } private <T> ChunkDetails<T> getChunkDetails(String key) { final List<String> firstKeys = new ArrayList<String>(2); firstKeys.add(key); final String firstKey = key + "_00"; firstKeys.add(firstKey); try { final Map<String, CachedData> metadataMap = evcacheMemcachedClient.asyncGetBulk(firstKeys, chunkingTranscoder, null) .getSome(readTimeout.get(), TimeUnit.MILLISECONDS, false, false); if (metadataMap.containsKey(key)) { return new ChunkDetails(null, null, false, metadataMap.get(key)); } else if (metadataMap.containsKey(firstKey)) { final ChunkInfo ci = getChunkInfo(firstKey, (String) decodingTranscoder.decode(metadataMap.get(firstKey))); if (ci == null) return null; final List<String> keys = new ArrayList<>(); for (int i = 1; i < ci.getChunks(); i++) { final String prefix = (i < 10) ? "0" : ""; keys.add(ci.getKey() + "_" + prefix + i); } return new ChunkDetails(keys, ci, true, null); } else { return null; } } catch (Exception e) { log.error(e.getMessage(), e); } return null; } private <T> Single<ChunkDetails<T>> getChunkDetails(String key, Scheduler scheduler) { final List<String> firstKeys = new ArrayList<>(2); firstKeys.add(key); final String firstKey = key + "_00"; firstKeys.add(firstKey); return evcacheMemcachedClient.asyncGetBulk(firstKeys, chunkingTranscoder, null) .getSome(readTimeout.get(), TimeUnit.MILLISECONDS, false, false, scheduler) .map(metadataMap -> { if (metadataMap.containsKey(key)) { return new ChunkDetails(null, null, false, metadataMap.get(key)); } else if (metadataMap.containsKey(firstKey)) { final ChunkInfo ci = getChunkInfo(firstKey, (String) decodingTranscoder.decode(metadataMap.get(firstKey))); if (ci == null) return null; final List<String> keys = new ArrayList<>(); for (int i = 1; i < ci.getChunks(); i++) { final String prefix = (i < 10) ? "0" : ""; keys.add(ci.getKey() + "_" + prefix + i); } return new ChunkDetails(keys, ci, true, null); } else { return null; } }); } private <T> T assembleChunks(String key, boolean touch, int ttl, Transcoder<T> tc, boolean hasZF) { try { final ChunkDetails<T> cd = getChunkDetails(key); if (cd == null) return null; if (!cd.isChunked()) { if (cd.getData() == null) return null; final Transcoder<T> transcoder = (tc == null ? (Transcoder<T>) evcacheMemcachedClient.getTranscoder() : tc); return transcoder.decode((CachedData) cd.getData()); } else { final List<String> keys = cd.getChunkKeys(); final ChunkInfo ci = cd.getChunkInfo(); final Map<String, CachedData> dataMap = evcacheMemcachedClient.asyncGetBulk(keys, chunkingTranscoder, null) .getSome(readTimeout.get(), TimeUnit.MILLISECONDS, false, false); if (dataMap.size() != ci.getChunks() - 1) { incrementFailure(EVCacheMetricsFactory.INCORRECT_CHUNKS, null); return null; } final byte[] data = new byte[(ci.getChunks() - 2) * ci.getChunkSize() + (ci.getLastChunk() == 0 ? ci .getChunkSize() : ci.getLastChunk())]; int index = 0; for (int i = 0; i < keys.size(); i++) { final String _key = keys.get(i); final CachedData _cd = dataMap.get(_key); if (log.isDebugEnabled()) log.debug("Chunk Key " + _key + "; Value : " + _cd); if (_cd == null) continue; final byte[] val = _cd.getData(); // If we expect a chunk to be present and it is null then return null immediately. if (val == null) return null; final int len = (i == keys.size() - 1) ? ((ci.getLastChunk() == 0 || ci.getLastChunk() > ci .getChunkSize()) ? ci.getChunkSize() : ci.getLastChunk()) : val.length; if (len != ci.getChunkSize() && i != keys.size() - 1) { incrementFailure(EVCacheMetricsFactory.INVALID_CHUNK_SIZE, null); if (log.isWarnEnabled()) log.warn("CHUNK_SIZE_ERROR : Chunks : " + ci.getChunks() + " ; " + "length : " + len + "; expectedLength : " + ci.getChunkSize() + " for key : " + _key); } if (len > 0) { try { System.arraycopy(val, 0, data, index, len); } catch (Exception e) { StringBuilder sb = new StringBuilder(); sb.append("ArrayCopyError - Key : " + _key + "; final data Size : " + data.length + "; copy array size : " + len + "; val size : " + val.length + "; key index : " + i + "; copy from : " + index + "; ChunkInfo : " + ci + "\n"); for (int j = 0; j < keys.size(); j++) { final String skey = keys.get(j); final byte[] sval = (byte[]) dataMap.get(skey).getData(); sb.append(skey + "=" + sval.length + "\n"); } if (log.isWarnEnabled()) log.warn(sb.toString(), e); throw e; } index += val.length; if (touch) evcacheMemcachedClient.touch(_key, ttl); } } final boolean checksumPass = checkCRCChecksum(data, ci, hasZF); if (!checksumPass) return null; final Transcoder<T> transcoder = (tc == null ? (Transcoder<T>) evcacheMemcachedClient.getTranscoder() : tc); return transcoder.decode(new CachedData(ci.getFlags(), data, Integer.MAX_VALUE)); } } catch (Exception e) { log.error(e.getMessage(), e); } return null; } private <T> Single<T> assembleChunks(String key, boolean touch, int ttl, Transcoder<T> tc, boolean hasZF, Scheduler scheduler) { return getChunkDetails(key, scheduler).flatMap(cd -> { if (cd == null) return Single.just(null); if (!cd.isChunked()) { if (cd.getData() == null) return Single.just(null); final Transcoder<T> transcoder = (tc == null ? (Transcoder<T>) evcacheMemcachedClient.getTranscoder() : tc); return Single.just(transcoder.decode((CachedData) cd.getData())); } else { final List<String> keys = cd.getChunkKeys(); final ChunkInfo ci = cd.getChunkInfo(); return evcacheMemcachedClient.asyncGetBulk(keys, chunkingTranscoder, null) .getSome(readTimeout.get(), TimeUnit.MILLISECONDS, false, false, scheduler) .map(dataMap -> { if (dataMap.size() != ci.getChunks() - 1) { incrementFailure(EVCacheMetricsFactory.INCORRECT_CHUNKS, null); return null; } final byte[] data = new byte[(ci.getChunks() - 2) * ci.getChunkSize() + (ci.getLastChunk() == 0 ? ci .getChunkSize() : ci.getLastChunk())]; int index = 0; for (int i = 0; i < keys.size(); i++) { final String _key = keys.get(i); final CachedData _cd = dataMap.get(_key); if (log.isDebugEnabled()) log.debug("Chunk Key " + _key + "; Value : " + _cd); if (_cd == null) continue; final byte[] val = _cd.getData(); // If we expect a chunk to be present and it is null then return null immediately. if (val == null) return null; final int len = (i == keys.size() - 1) ? ((ci.getLastChunk() == 0 || ci.getLastChunk() > ci .getChunkSize()) ? ci.getChunkSize() : ci.getLastChunk()) : val.length; if (len != ci.getChunkSize() && i != keys.size() - 1) { incrementFailure(EVCacheMetricsFactory.INVALID_CHUNK_SIZE, null); if (log.isWarnEnabled()) log.warn("CHUNK_SIZE_ERROR : Chunks : " + ci.getChunks() + " ; " + "length : " + len + "; expectedLength : " + ci.getChunkSize() + " for key : " + _key); } if (len > 0) { try { System.arraycopy(val, 0, data, index, len); } catch (Exception e) { StringBuilder sb = new StringBuilder(); sb.append("ArrayCopyError - Key : " + _key + "; final data Size : " + data.length + "; copy array size : " + len + "; val size : " + val.length + "; key index : " + i + "; copy from : " + index + "; ChunkInfo : " + ci + "\n"); for (int j = 0; j < keys.size(); j++) { final String skey = keys.get(j); final byte[] sval = (byte[]) dataMap.get(skey).getData(); sb.append(skey + "=" + sval.length + "\n"); } if (log.isWarnEnabled()) log.warn(sb.toString(), e); throw e; } System.arraycopy(val, 0, data, index, len); index += val.length; if (touch) evcacheMemcachedClient.touch(_key, ttl); } } final boolean checksumPass = checkCRCChecksum(data, ci, hasZF); if (!checksumPass) return null; final Transcoder<T> transcoder = (tc == null ? (Transcoder<T>) evcacheMemcachedClient.getTranscoder() : tc); return transcoder.decode(new CachedData(ci.getFlags(), data, Integer.MAX_VALUE)); }); } }); } private boolean checkCRCChecksum(byte[] data, final ChunkInfo ci, boolean hasZF) { if (data == null || data.length == 0) return false; final Checksum checksum = new CRC32(); checksum.update(data, 0, data.length); final long currentChecksum = checksum.getValue(); final long expectedChecksum = ci.getChecksum(); if (log.isDebugEnabled()) log.debug("CurrentChecksum : " + currentChecksum + "; ExpectedChecksum : " + expectedChecksum + " for key : " + ci.getKey()); if (currentChecksum != expectedChecksum) { if (!hasZF) { if (log.isWarnEnabled()) log.warn("CHECKSUM_ERROR : Chunks : " + ci.getChunks() + " ; " + "currentChecksum : " + currentChecksum + "; expectedChecksum : " + expectedChecksum + " for key : " + ci.getKey()); incrementFailure(EVCacheMetricsFactory.CHECK_SUM_ERROR, null); } return false; } return true; } private ChunkInfo getChunkInfo(String firstKey, String metadata) { if (metadata == null) return null; final String[] metaItems = metadata.split(":"); if (metaItems.length != 5) return null; final String key = firstKey.substring(0, firstKey.length() - 3); final ChunkInfo ci = new ChunkInfo(Integer.parseInt(metaItems[0]), Integer.parseInt(metaItems[1]), Integer .parseInt(metaItems[2]), Integer.parseInt(metaItems[3]), key, Long .parseLong(metaItems[4])); return ci; } private <T> Map<String, T> assembleChunks(Collection<String> keyList, Transcoder<T> tc, boolean hasZF) { final List<String> firstKeys = new ArrayList<>(); for (String key : keyList) { firstKeys.add(key); firstKeys.add(key + "_00"); } try { final Map<String, CachedData> metadataMap = evcacheMemcachedClient.asyncGetBulk(firstKeys, chunkingTranscoder, null) .getSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS, false, false); if (metadataMap == null) return null; final Map<String, T> returnMap = new HashMap<>(keyList.size() * 2); for (String key : keyList) { if (metadataMap.containsKey(key)) { CachedData val = metadataMap.remove(key); returnMap.put(key, tc.decode(val)); } } final List<String> allKeys = new ArrayList<>(); final Map<ChunkInfo, SimpleEntry<List<String>, byte[]>> responseMap = new HashMap<>(); for (Entry<String, CachedData> entry : metadataMap.entrySet()) { final String firstKey = entry.getKey(); final String metadata = (String) decodingTranscoder.decode(entry.getValue()); if (metadata == null) continue; final ChunkInfo ci = getChunkInfo(firstKey, metadata); if (ci != null) { final List<String> ciKeys = new ArrayList<>(); for (int i = 1; i < ci.getChunks(); i++) { final String prefix = (i < 10) ? "0" : ""; final String _key = ci.getKey() + "_" + prefix + i; allKeys.add(_key); ciKeys.add(_key); } final byte[] data = new byte[(ci.getChunks() - 2) * ci.getChunkSize() + ci.getLastChunk()]; responseMap.put(ci, new SimpleEntry<>(ciKeys, data)); } } final Map<String, CachedData> dataMap = evcacheMemcachedClient.asyncGetBulk(allKeys, chunkingTranscoder, null) .getSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS, false, false); for (Entry<ChunkInfo, SimpleEntry<List<String>, byte[]>> entry : responseMap.entrySet()) { final ChunkInfo ci = entry.getKey(); final SimpleEntry<List<String>, byte[]> pair = entry.getValue(); final List<String> ciKeys = pair.getKey(); byte[] data = pair.getValue(); int index = 0; for (int i = 0; i < ciKeys.size(); i++) { final String _key = ciKeys.get(i); final CachedData cd = dataMap.get(_key); if (log.isDebugEnabled()) log.debug("Chunk Key " + _key + "; Value : " + cd); if (cd == null) continue; final byte[] val = cd.getData(); if (val == null) { data = null; break; } final int len = (i == ciKeys.size() - 1) ? ((ci.getLastChunk() == 0 || ci.getLastChunk() > ci .getChunkSize()) ? ci.getChunkSize() : ci.getLastChunk()) : val.length; try { System.arraycopy(val, 0, data, index, len); } catch (Exception e) { StringBuilder sb = new StringBuilder(); sb.append("ArrayCopyError - Key : " + _key + "; final data Size : " + data.length + "; copy array size : " + len + "; val size : " + val.length + "; key index : " + i + "; copy from : " + index + "; ChunkInfo : " + ci + "\n"); for (int j = 0; j < ciKeys.size(); j++) { final String skey = ciKeys.get(j); final byte[] sval = dataMap.get(skey).getData(); sb.append(skey + "=" + sval.length + "\n"); } if (log.isWarnEnabled()) log.warn(sb.toString(), e); throw e; } index += val.length; } final boolean checksumPass = checkCRCChecksum(data, ci, hasZF); if (data != null && checksumPass) { final CachedData cd = new CachedData(ci.getFlags(), data, Integer.MAX_VALUE); returnMap.put(ci.getKey(), tc.decode(cd)); } else { returnMap.put(ci.getKey(), null); } } return returnMap; } catch (Exception e) { log.error(e.getMessage(), e); } return null; } private <T> Single<Map<String, T>> assembleChunks(Collection<String> keyList, Transcoder<T> tc, boolean hasZF, Scheduler scheduler) { final List<String> firstKeys = new ArrayList<>(); for (String key : keyList) { firstKeys.add(key); firstKeys.add(key + "_00"); } return evcacheMemcachedClient.asyncGetBulk(firstKeys, chunkingTranscoder, null) .getSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS, false, false, scheduler) .flatMap(metadataMap -> { if (metadataMap == null) return null; final Map<String, T> returnMap = new HashMap<>(keyList.size() * 2); for (String key : keyList) { if (metadataMap.containsKey(key)) { CachedData val = metadataMap.remove(key); returnMap.put(key, tc.decode(val)); } } final List<String> allKeys = new ArrayList<>(); final Map<ChunkInfo, SimpleEntry<List<String>, byte[]>> responseMap = new HashMap<>(); for (Entry<String, CachedData> entry : metadataMap.entrySet()) { final String firstKey = entry.getKey(); final String metadata = (String) decodingTranscoder.decode(entry.getValue()); if (metadata == null) continue; final ChunkInfo ci = getChunkInfo(firstKey, metadata); if (ci != null) { final List<String> ciKeys = new ArrayList<>(); for (int i = 1; i < ci.getChunks(); i++) { final String prefix = (i < 10) ? "0" : ""; final String _key = ci.getKey() + "_" + prefix + i; allKeys.add(_key); ciKeys.add(_key); } final byte[] data = new byte[(ci.getChunks() - 2) * ci.getChunkSize() + ci.getLastChunk()]; responseMap.put(ci, new SimpleEntry<>(ciKeys, data)); } } return evcacheMemcachedClient.asyncGetBulk(allKeys, chunkingTranscoder, null) .getSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS, false, false, scheduler) .map(dataMap -> { for (Entry<ChunkInfo, SimpleEntry<List<String>, byte[]>> entry : responseMap.entrySet()) { final ChunkInfo ci = entry.getKey(); final SimpleEntry<List<String>, byte[]> pair = entry.getValue(); final List<String> ciKeys = pair.getKey(); byte[] data = pair.getValue(); int index = 0; for (int i = 0; i < ciKeys.size(); i++) { final String _key = ciKeys.get(i); final CachedData cd = dataMap.get(_key); if (log.isDebugEnabled()) log.debug("Chunk Key " + _key + "; Value : " + cd); if (cd == null) continue; final byte[] val = cd.getData(); if (val == null) { data = null; break; } final int len = (i == ciKeys.size() - 1) ? ((ci.getLastChunk() == 0 || ci.getLastChunk() > ci .getChunkSize()) ? ci.getChunkSize() : ci.getLastChunk()) : val.length; try { System.arraycopy(val, 0, data, index, len); } catch (Exception e) { StringBuilder sb = new StringBuilder(); sb.append("ArrayCopyError - Key : " + _key + "; final data Size : " + data.length + "; copy array size : " + len + "; val size : " + val.length + "; key index : " + i + "; copy from : " + index + "; ChunkInfo : " + ci + "\n"); for (int j = 0; j < ciKeys.size(); j++) { final String skey = ciKeys.get(j); final byte[] sval = dataMap.get(skey).getData(); sb.append(skey + "=" + sval.length + "\n"); } if (log.isWarnEnabled()) log.warn(sb.toString(), e); throw e; } index += val.length; } final boolean checksumPass = checkCRCChecksum(data, ci, hasZF); if (data != null && checksumPass) { final CachedData cd = new CachedData(ci.getFlags(), data, Integer.MAX_VALUE); returnMap.put(ci.getKey(), tc.decode(cd)); } else { returnMap.put(ci.getKey(), null); } } return returnMap; }); }); } private CachedData[] createChunks(CachedData cd, String key) { final int cSize = chunkSize.get(); if ((key.length() + 3) > cSize) throw new IllegalArgumentException("The chunksize " + cSize + " is smaller than the key size. Will not be able to proceed. key size = " + key.length()); final int len = cd.getData().length; /* the format of headers in memcached */ // Key size + 1 + Header( Flags (Characters Number) + Key (Characters Numbers) + 2 bytes ( \r\n ) + 4 bytes (2 spaces and 1 \r)) + Chunk Size + CAS Size // final int overheadSize = key.length() // Key Size // + 1 // Space // + 4 // Flags (Characters Number) // + 4 // Key (Characters Numbers) // + 2 // /r/n // + 4 // 2 spaces and 1 \r // + 48 // Header Size // + 8; // CAS final int overheadSize = key.length() + 71 + 3; // 3 because we will suffix _00, _01 ... _99; 68 is the size of the memcached header final int actualChunkSize = cSize - overheadSize; int lastChunkSize = len % actualChunkSize; final int numOfChunks = len / actualChunkSize + ((lastChunkSize > 0) ? 1 : 0) + 1; final CachedData[] chunkData = new CachedData[numOfChunks]; if (lastChunkSize == 0) lastChunkSize = actualChunkSize; final long sTime = System.nanoTime(); final Checksum checksum = new CRC32(); checksum.update(cd.getData(), 0, len); final long checkSumValue = checksum.getValue(); int srcPos = 0; if (log.isDebugEnabled()) log.debug("Ths size of data is " + len + " ; we will create " + (numOfChunks - 1) + " of " + actualChunkSize + " bytes. Checksum : " + checkSumValue + "; Checksum Duration : " + (System.nanoTime() - sTime)); chunkData[0] = decodingTranscoder.encode(numOfChunks + ":" + actualChunkSize + ":" + lastChunkSize + ":" + cd .getFlags() + ":" + checkSumValue); for (int i = 1; i < numOfChunks; i++) { int lengthOfArray = actualChunkSize; if (srcPos + actualChunkSize > len) { lengthOfArray = len - srcPos; } byte[] dest = new byte[actualChunkSize]; System.arraycopy(cd.getData(), srcPos, dest, 0, lengthOfArray); if (actualChunkSize > lengthOfArray) { for (int j = lengthOfArray; j < actualChunkSize; j++) { dest[j] = Character.UNASSIGNED;// Adding filler data } } srcPos += lengthOfArray; //chunkData[i] = decodingTranscoder.encode(dest); chunkData[i] = new CachedData(SPECIAL_BYTEARRAY, dest, Integer.MAX_VALUE); } EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.INTERNAL_NUM_CHUNK_SIZE, getTagList()).record(numOfChunks); EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.INTERNAL_CHUNK_DATA_SIZE, getTagList()).record(len); return chunkData; } /** * Retrieves all the chunks as is. This is mainly used for debugging. * * @param key * @return Returns all the chunks retrieved. * @throws EVCacheReadQueueException * @throws EVCacheException * @throws Exception */ public Map<String, CachedData> getAllChunks(String key) throws EVCacheReadQueueException, EVCacheException, Exception { try { final ChunkDetails<Object> cd = getChunkDetails(key); if(log.isDebugEnabled()) log.debug("Chunkdetails " + cd); if (cd == null) return null; if (!cd.isChunked()) { Map<String, CachedData> rv = new HashMap<String, CachedData>(); rv.put(key, (CachedData) cd.getData()); if(log.isDebugEnabled()) log.debug("Data : " + rv); return rv; } else { final List<String> keys = cd.getChunkKeys(); if(log.isDebugEnabled()) log.debug("Keys - " + keys); final Map<String, CachedData> dataMap = evcacheMemcachedClient.asyncGetBulk(keys, chunkingTranscoder, null) .getSome(readTimeout.get().intValue(), TimeUnit.MILLISECONDS, false, false); if(log.isDebugEnabled()) log.debug("Datamap " + dataMap); return dataMap; } } catch (Exception e) { log.error(e.getMessage(), e); } return null; } public long incr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException { return evcacheMemcachedClient.incr(key, by, defaultVal, timeToLive); } public long decr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException { return evcacheMemcachedClient.decr(key, by, defaultVal, timeToLive); } public <T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) { if(log.isDebugEnabled()) log.debug("fetching data getAsync {}", key); return evcacheMemcachedClient .asyncGet(key, tc, null) .getAsync(readTimeout.get(), TimeUnit.MILLISECONDS); } public <T> T get(String key, Transcoder<T> tc, boolean _throwException, boolean hasZF, boolean chunked) throws Exception { if (chunked) { return assembleChunks(key, false, 0, tc, hasZF); } else { return evcacheMemcachedClient.asyncGet(key, tc, null).get(readTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF); } } public <T> T get(String key, Transcoder<T> tc, boolean _throwException, boolean hasZF) throws Exception { if (!validateNode(key, _throwException, Call.GET)) { if(ignoreInactiveNodes.get()) { incrementFailure(EVCacheMetricsFactory.IGNORE_INACTIVE_NODES, Call.GET); return pool.getEVCacheClientForReadExclude(serverGroup).get(key, tc, _throwException, hasZF, enableChunking.get()); } else { return null; } } return get(key, tc, _throwException, hasZF, enableChunking.get()); } public <T> Single<T> get(String key, Transcoder<T> tc, boolean _throwException, boolean hasZF, boolean chunked, Scheduler scheduler) throws Exception { if (chunked) { return assembleChunks(key, _throwException, 0, tc, hasZF, scheduler); } else { return evcacheMemcachedClient.asyncGet(key, tc, null) .get(readTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF, scheduler); } } public <T> Single<T> get(String key, Transcoder<T> tc, boolean _throwException, boolean hasZF, Scheduler scheduler) { try { if (!validateNode(key, _throwException, Call.GET)) { if(ignoreInactiveNodes.get()) { incrementFailure(EVCacheMetricsFactory.IGNORE_INACTIVE_NODES, Call.GET); return pool.getEVCacheClientForReadExclude(serverGroup).get(key, tc, _throwException, hasZF, enableChunking.get(), scheduler); } else { return Single.just(null); } } return get(key, tc, _throwException, hasZF, enableChunking.get(), scheduler); } catch (Throwable e) { return Single.error(e); } } public <T> T getAndTouch(String key, Transcoder<T> tc, int timeToLive, boolean _throwException, boolean hasZF) throws Exception { EVCacheMemcachedClient _client = evcacheMemcachedClient; if (!validateNode(key, _throwException, Call.GET_AND_TOUCH)) { if(ignoreInactiveNodes.get()) { incrementFailure(EVCacheMetricsFactory.IGNORE_INACTIVE_NODES, Call.GET_AND_TOUCH); _client = pool.getEVCacheClientForReadExclude(serverGroup).getEVCacheMemcachedClient(); } else { return null; } } if (tc == null) tc = (Transcoder<T>) getTranscoder(); final T returnVal; if (enableChunking.get()) { return assembleChunks(key, false, 0, tc, hasZF); } else { if(ignoreTouch.get()) { returnVal = _client.asyncGet(key, tc, null).get(readTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF); } else { final CASValue<T> value = _client.asyncGetAndTouch(key, timeToLive, tc).get(readTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF); returnVal = (value == null) ? null : value.getValue(); } } return returnVal; } public <T> Single<T> getAndTouch(String key, Transcoder<T> transcoder, int timeToLive, boolean _throwException, boolean hasZF, Scheduler scheduler) { try { EVCacheMemcachedClient client = evcacheMemcachedClient; if (!validateNode(key, _throwException, Call.GET_AND_TOUCH)) { if(ignoreInactiveNodes.get()) { incrementFailure(EVCacheMetricsFactory.IGNORE_INACTIVE_NODES, Call.GET_AND_TOUCH); client = pool.getEVCacheClientForReadExclude(serverGroup).getEVCacheMemcachedClient(); } else { return null; } } final EVCacheMemcachedClient _client = client; final Transcoder<T> tc = (transcoder == null) ? (Transcoder<T>) getTranscoder(): transcoder; if (enableChunking.get()) { return assembleChunks(key, false, 0, tc, hasZF, scheduler); } else { return _client.asyncGetAndTouch(key, timeToLive, tc) .get(readTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF, scheduler) .map(value -> (value == null) ? null : value.getValue()); } } catch (Throwable e) { return Single.error(e); } } public <T> Map<String, T> getBulk(Collection<String> _canonicalKeys, Transcoder<T> tc, boolean _throwException, boolean hasZF) throws Exception { final Collection<String> canonicalKeys = validateReadQueueSize(_canonicalKeys, Call.BULK); final Map<String, T> returnVal; try { if (tc == null) tc = (Transcoder<T>) getTranscoder(); if (enableChunking.get()) { returnVal = assembleChunks(_canonicalKeys, tc, hasZF); } else { returnVal = evcacheMemcachedClient.asyncGetBulk(canonicalKeys, tc, null) .getSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF); } } catch (Exception e) { if (_throwException) throw e; return Collections.<String, T> emptyMap(); } return returnVal; } public <T> CompletableFuture<Map<String, T>> getAsyncBulk(Collection<String> _canonicalKeys, Transcoder<T> tc) { final Collection<String> canonicalKeys = validateReadQueueSize(_canonicalKeys, Call.COMPLETABLE_FUTURE_GET_BULK); if (tc == null) tc = (Transcoder<T>) getTranscoder(); return evcacheMemcachedClient .asyncGetBulk(canonicalKeys, tc, null) .getAsyncSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS); } public <T> Single<Map<String, T>> getBulk(Collection<String> _canonicalKeys, final Transcoder<T> transcoder, boolean _throwException, boolean hasZF, Scheduler scheduler) { try { final Collection<String> canonicalKeys = validateReadQueueSize(_canonicalKeys, Call.BULK); final Transcoder<T> tc = (transcoder == null) ? (Transcoder<T>) getTranscoder() : transcoder; if (enableChunking.get()) { return assembleChunks(_canonicalKeys, tc, hasZF, scheduler); } else { return evcacheMemcachedClient.asyncGetBulk(canonicalKeys, tc, null) .getSome(bulkReadTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF, scheduler); } } catch (Throwable e) { return Single.error(e); } } public <T> Future<Boolean> append(String key, T value) throws Exception { if (enableChunking.get()) throw new EVCacheException( "This operation is not supported as chunking is enabled on this EVCacheClient."); final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.APPEND)) return getDefaultFuture(); return evcacheMemcachedClient.append(key, value); } public Future<Boolean> set(String key, CachedData value, int timeToLive) throws Exception { return _set(key, value, timeToLive, null); } public Future<Boolean> set(String key, CachedData cd, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { return _set(key, cd, timeToLive, evcacheLatch); } @Deprecated public <T> Future<Boolean> set(String key, T value, int timeToLive) throws Exception { return set(key, value, timeToLive, null); } @Deprecated public <T> Future<Boolean> set(String key, T value, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { final CachedData cd; if (value instanceof CachedData) { cd = (CachedData) value; } else { cd = getTranscoder().encode(value); } return _set(key, cd, timeToLive, evcacheLatch); } private Future<Boolean> _set(String key, CachedData value, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.SET)) { if (log.isInfoEnabled()) log.info("Node : " + node + " is not active. Failing fast and dropping the write event."); final ListenableFuture<Boolean, OperationCompletionListener> defaultFuture = (ListenableFuture<Boolean, OperationCompletionListener>) getDefaultFuture(); if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(defaultFuture); return defaultFuture; } try { final int dataSize = ((CachedData) value).getData().length; if (enableChunking.get()) { if (dataSize > chunkSize.get()) { final CachedData[] cd = createChunks(value, key); final int len = cd.length; final OperationFuture<Boolean>[] futures = new OperationFuture[len]; for (int i = 0; i < cd.length; i++) { final String prefix = (i < 10) ? "0" : ""; futures[i] = evcacheMemcachedClient.set(key + "_" + prefix + i, timeToLive, cd[i], null, null); } // ensure we are deleting the unchunked key if it exists. // Ignore return value since it may not exist. evcacheMemcachedClient.delete(key); return new EVCacheFutures(futures, key, appName, serverGroup, evcacheLatch); } else { // delete all the chunks if they exist as the // data is moving from chunked to unchunked delete(key); return evcacheMemcachedClient.set(key, timeToLive, value, null, evcacheLatch); } } else { return evcacheMemcachedClient.set(key, timeToLive, value, null, evcacheLatch); } } catch (Exception e) { log.error(e.getMessage(), e); throw e; } } private Boolean shouldHashKey() { return hashKeyByServerGroup.get(); } public HashingAlgorithm getHashingAlgorithm() { if (null == shouldHashKey()) { // hash key property is not set at the client level return null; } // return NO_HASHING if hashing is explicitly disabled at client level return shouldHashKey() ? KeyHasher.getHashingAlgorithmFromString(hashingAlgo.get()) : HashingAlgorithm.NO_HASHING; } public <T> Future<Boolean> appendOrAdd(String key, CachedData value, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.APPEND_OR_ADD)) { if (log.isInfoEnabled()) log.info("Node : " + node + " is not active. Failing fast and dropping the write event."); final ListenableFuture<Boolean, OperationCompletionListener> defaultFuture = (ListenableFuture<Boolean, OperationCompletionListener>) getDefaultFuture(); if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(defaultFuture); return defaultFuture; } try { return evcacheMemcachedClient.asyncAppendOrAdd(key, timeToLive, value, evcacheLatch); } catch (Exception e) { log.error(e.getMessage(), e); throw e; } } public Future<Boolean> replace(String key, CachedData cd, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { return _replace(key, cd, timeToLive, evcacheLatch); } @Deprecated public <T> Future<Boolean> replace(String key, T value, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { final CachedData cd; if (value instanceof CachedData) { cd = (CachedData) value; } else { cd = getTranscoder().encode(value); } return _replace(key, cd, timeToLive, evcacheLatch); } private Future<Boolean> _replace(String key, CachedData value, int timeToLive, EVCacheLatch evcacheLatch) throws Exception { final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.REPLACE)) { if (log.isInfoEnabled()) log.info("Node : " + node + " is not active. Failing fast and dropping the replace event."); final ListenableFuture<Boolean, OperationCompletionListener> defaultFuture = (ListenableFuture<Boolean, OperationCompletionListener>) getDefaultFuture(); if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(defaultFuture); return defaultFuture; } try { final int dataSize = ((CachedData) value).getData().length; if (enableChunking.get() && dataSize > chunkSize.get()) { final CachedData[] cd = createChunks(value, key); final int len = cd.length; final OperationFuture<Boolean>[] futures = new OperationFuture[len]; for (int i = 0; i < cd.length; i++) { final String prefix = (i < 10) ? "0" : ""; futures[i] = evcacheMemcachedClient.replace(key + "_" + prefix + i, timeToLive, cd[i], null, null); } return new EVCacheFutures(futures, key, appName, serverGroup, evcacheLatch); } else { return evcacheMemcachedClient.replace(key, timeToLive, value, null, evcacheLatch); } } catch (Exception e) { log.error(e.getMessage(), e); throw e; } } private Future<Boolean> _add(String key, int exp, CachedData value, EVCacheLatch latch) throws Exception { if (enableChunking.get()) throw new EVCacheException("This operation is not supported as chunking is enabled on this EVCacheClient."); final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.ADD)) return getDefaultFuture(); return evcacheMemcachedClient.add(key, exp, value, null, latch); } @Deprecated public <T> Future<Boolean> add(String key, int exp, T value) throws Exception { final CachedData cd; if (value instanceof CachedData) { cd = (CachedData) value; } else { cd = getTranscoder().encode(value); } return _add(key, exp, cd, null); } @Deprecated public <T> Future<Boolean> add(String key, int exp, T value, Transcoder<T> tc) throws Exception { final CachedData cd; if (value instanceof CachedData) { cd = (CachedData) value; } else { if(tc == null) { cd = getTranscoder().encode(value); } else { cd = tc.encode(value); } } return _add(key, exp, cd, null); } @Deprecated public <T> Future<Boolean> add(String key, int exp, T value, final Transcoder<T> tc, EVCacheLatch latch) throws Exception { final CachedData cd; if (value instanceof CachedData) { cd = (CachedData) value; } else { if(tc == null) { cd = getTranscoder().encode(value); } else { cd = tc.encode(value); } } return _add(key, exp, cd, latch); } public Future<Boolean> add(String key, int exp, CachedData value, EVCacheLatch latch) throws Exception { return _add(key, exp, value, latch); } public <T> Future<Boolean> touch(String key, int timeToLive) throws Exception { return touch(key, timeToLive, null); } public <T> Future<Boolean> touch(String key, int timeToLive, EVCacheLatch latch) throws Exception { if(ignoreTouch.get()) { final ListenableFuture<Boolean, OperationCompletionListener> sf = new SuccessFuture(); if (latch != null && latch instanceof EVCacheLatchImpl && !isInWriteOnly()) ((EVCacheLatchImpl) latch).addFuture(sf); return sf; } final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.TOUCH)) { final ListenableFuture<Boolean, OperationCompletionListener> defaultFuture = (ListenableFuture<Boolean, OperationCompletionListener>) getDefaultFuture(); if (latch != null && latch instanceof EVCacheLatchImpl && !isInWriteOnly()) ((EVCacheLatchImpl) latch).addFuture(defaultFuture); return defaultFuture; } if (enableChunking.get()) { final ChunkDetails<?> cd = getChunkDetails(key); if (cd.isChunked()) { final List<String> keys = cd.getChunkKeys(); OperationFuture<Boolean>[] futures = new OperationFuture[keys.size() + 1]; futures[0] = evcacheMemcachedClient.touch(key + "_00", timeToLive, latch); for (int i = 0; i < keys.size(); i++) { final String prefix = (i < 10) ? "0" : ""; final String _key = key + "_" + prefix + i; futures[i + 1] = evcacheMemcachedClient.touch(_key, timeToLive, latch); } return new EVCacheFutures(futures, key, appName, serverGroup, latch); } else { return evcacheMemcachedClient.touch(key, timeToLive, latch); } } else { return evcacheMemcachedClient.touch(key, timeToLive, latch); } } public <T> Future<T> asyncGet(String key, Transcoder<T> tc, boolean _throwException, boolean hasZF) throws Exception { if (enableChunking.get()) throw new EVCacheException( "This operation is not supported as chunking is enabled on this EVCacheClient."); if (!validateNode(key, _throwException, Call.ASYNC_GET)) return null; if (tc == null) tc = (Transcoder<T>) getTranscoder(); return evcacheMemcachedClient.asyncGet(key, tc, null); } public Future<Boolean> delete(String key) throws Exception { return delete(key, null); } public Future<Boolean> delete(String key, EVCacheLatch latch) throws Exception { final MemcachedNode node = evcacheMemcachedClient.getEVCacheNode(key); if (!ensureWriteQueueSize(node, key, Call.DELETE)) { final ListenableFuture<Boolean, OperationCompletionListener> defaultFuture = (ListenableFuture<Boolean, OperationCompletionListener>) getDefaultFuture(); if (latch != null && latch instanceof EVCacheLatchImpl && !isInWriteOnly()) ((EVCacheLatchImpl) latch).addFuture(defaultFuture); return defaultFuture; } if (enableChunking.get()) { final ChunkDetails<?> cd = getChunkDetails(key); if (cd == null) { // Paranoid delete : cases where get fails and we ensure the first key is deleted just in case return evcacheMemcachedClient.delete(key + "_00", latch); } if (!cd.isChunked()) { return evcacheMemcachedClient.delete(key, latch); } else { final List<String> keys = cd.getChunkKeys(); OperationFuture<Boolean>[] futures = new OperationFuture[keys.size() + 1]; futures[0] = evcacheMemcachedClient.delete(key + "_00"); for (int i = 0; i < keys.size(); i++) { futures[i + 1] = evcacheMemcachedClient.delete(keys.get(i), null); } return new EVCacheFutures(futures, key, appName, serverGroup, latch); } } else { return evcacheMemcachedClient.delete(key, latch); } } public boolean removeConnectionObserver() { try { boolean removed = evcacheMemcachedClient.removeObserver(connectionObserver); if (removed) connectionObserver = null; return removed; } catch (Exception e) { return false; } } public boolean shutdown(long timeout, TimeUnit unit) { if(shutdown) return true; shutdown = true; try { evcacheMemcachedClient.shutdown(timeout, unit); } catch(Throwable t) { log.error("Exception while shutting down", t); } return true; } public EVCacheConnectionObserver getConnectionObserver() { return this.connectionObserver; } public ConnectionFactory getConnectionFactory() { return connectionFactory; } public String getAppName() { return appName; } public String getZone() { return zone; } public int getId() { return id; } public ServerGroup getServerGroup() { return serverGroup; } public String getServerGroupName() { return (serverGroup == null ? "NA" : serverGroup.getName()); } public boolean isShutdown() { return this.shutdown; } public boolean isInWriteOnly(){ return pool.isInWriteOnly(getServerGroup()); } public Map<SocketAddress, Map<String, String>> getStats(String cmd) { return evcacheMemcachedClient.getStats(cmd); } public Map<SocketAddress, String> execCmd(String cmd, String[] ips) { return evcacheMemcachedClient.execCmd(cmd, ips); } public Map<SocketAddress, String> getVersions() { return evcacheMemcachedClient.getVersions(); } public Future<Boolean> flush() { return evcacheMemcachedClient.flush(); } public Transcoder<Object> getTranscoder() { return evcacheMemcachedClient.getTranscoder(); } public ConnectionFactory getEVCacheConnectionFactory() { return this.connectionFactory; } public NodeLocator getNodeLocator() { return this.evcacheMemcachedClient.getNodeLocator(); } static class SuccessFuture implements ListenableFuture<Boolean, OperationCompletionListener> { @Override public boolean cancel(boolean mayInterruptIfRunning) { return true; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return true; } @Override public Boolean get() throws InterruptedException, ExecutionException { return Boolean.TRUE; } @Override public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return Boolean.TRUE; } @Override public Future<Boolean> addListener(OperationCompletionListener listener) { return this; } @Override public Future<Boolean> removeListener(OperationCompletionListener listener) { return this; } } static class DefaultFuture implements ListenableFuture<Boolean, OperationCompletionListener> { public boolean cancel(boolean mayInterruptIfRunning) { return false; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return true; } @Override public Boolean get() throws InterruptedException, ExecutionException { return Boolean.FALSE; } @Override public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return Boolean.FALSE; } @Override public Future<Boolean> addListener(OperationCompletionListener listener) { return this; } @Override public Future<Boolean> removeListener(OperationCompletionListener listener) { return this; } } private Future<Boolean> getDefaultFuture() { final Future<Boolean> defaultFuture = new DefaultFuture(); return defaultFuture; } public String toString() { return "App : " + appName + "; Zone : " + zone + "; Id : " + id + "; " + serverGroup.toString() + "; Nodes : " + memcachedNodesInZone.toString(); } public EVCacheMemcachedClient getEVCacheMemcachedClient() { return evcacheMemcachedClient; } public List<InetSocketAddress> getMemcachedNodesInZone() { return memcachedNodesInZone; } public int getMaxWriteQueueSize() { return maxWriteQueueSize; } public Property<Integer> getReadTimeout() { return readTimeout; } public Property<Integer> getBulkReadTimeout() { return bulkReadTimeout; } public Property<Integer> getMaxReadQueueSize() { return maxReadQueueSize; } public Property<Boolean> getEnableChunking() { return enableChunking; } public Property<Integer> getChunkSize() { return chunkSize; } public ChunkTranscoder getChunkingTranscoder() { return chunkingTranscoder; } public EVCacheSerializingTranscoder getDecodingTranscoder() { return decodingTranscoder; } public EVCacheClientPool getPool() { return pool; } public EVCacheServerGroupConfig getEVCacheConfig() { return config; } static class ChunkDetails<T> { final List<String> chunkKeys; final ChunkInfo chunkInfo; final boolean chunked; final T data; public ChunkDetails(List<String> chunkKeys, ChunkInfo chunkInfo, boolean chunked, T data) { super(); this.chunkKeys = chunkKeys; this.chunkInfo = chunkInfo; this.chunked = chunked; this.data = data; } public List<String> getChunkKeys() { return chunkKeys; } public ChunkInfo getChunkInfo() { return chunkInfo; } public boolean isChunked() { return chunked; } public T getData() { return data; } @Override public String toString() { return "ChunkDetails [chunkKeys=" + chunkKeys + ", chunkInfo=" + chunkInfo + ", chunked=" + chunked + ", data=" + data + "]"; } } static class ChunkInfo { final int chunks; final int chunkSize; final int lastChunk; final int flags; final String key; final long checksum; public ChunkInfo(int chunks, int chunkSize, int lastChunk, int flags, String firstKey, long checksum) { super(); this.chunks = chunks; this.chunkSize = chunkSize; this.lastChunk = lastChunk; this.flags = flags; this.key = firstKey; this.checksum = checksum; } public int getChunks() { return chunks; } public int getChunkSize() { return chunkSize; } public int getLastChunk() { return lastChunk; } public int getFlags() { return flags; } public String getKey() { return key; } public long getChecksum() { return checksum; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{\"chunks\":\""); builder.append(chunks); builder.append("\",\"chunkSize\":\""); builder.append(chunkSize); builder.append("\",\"lastChunk\":\""); builder.append(lastChunk); builder.append("\",\"flags\":\""); builder.append(flags); builder.append("\",\"key\":\""); builder.append(key); builder.append("\",\"checksum\":\""); builder.append(checksum); builder.append("\"}"); return builder.toString(); } } public int getWriteQueueLength() { final Collection<MemcachedNode> allNodes = evcacheMemcachedClient.getNodeLocator().getAll(); int size = 0; for(MemcachedNode node : allNodes) { if(node instanceof EVCacheNode) { size += ((EVCacheNode)node).getWriteQueueSize(); } } return size; } public int getReadQueueLength() { final Collection<MemcachedNode> allNodes = evcacheMemcachedClient.getNodeLocator().getAll(); int size = 0; for(MemcachedNode node : allNodes) { if(node instanceof EVCacheNode) { size += ((EVCacheNode)node).getReadQueueSize(); } } return size; } public List<Tag> getTagList() { return tags; } public Counter getOperationCounter() { return operationsCounter; } /** * Return the keys upto the limit. The key will be cannoicalized key( or hashed Key).<br> * <B> The keys are read into memory so make sure you have enough memory to read the specified number of keys<b> * @param limit - The number of keys that need to fetched from each memcached clients. * @return - the List of keys. */ public List<String> getAllKeys(final int limit) { final List<String> keyList = new ArrayList<String>(limit); byte[] array = new byte[EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".all.keys.reader.buffer.size.bytes", Integer.class).orElse(4*1024*1024).get()]; final int waitInSec = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".all.keys.reader.wait.duration.sec", Integer.class).orElse(60).get(); for(InetSocketAddress address : memcachedNodesInZone) { //final List<String> keyList = new ArrayList<String>(limit); Socket socket = null; PrintWriter printWriter = null; BufferedInputStream bufferedReader = null; try { socket = new Socket(address.getHostName(), address.getPort()); printWriter = new PrintWriter(socket.getOutputStream(), true); printWriter.print("lru_crawler metadump all \r\n"); printWriter.print("quit \r\n"); printWriter.flush(); bufferedReader = new BufferedInputStream(socket.getInputStream()); while(isDataAvailableForRead(bufferedReader, waitInSec, TimeUnit.SECONDS, socket)) { int read = bufferedReader.read(array); if (log.isDebugEnabled()) log.debug("Number of bytes read = " +read); if(read > 0) { StringBuilder b = new StringBuilder(); boolean start = true; for (int i = 0; i < read; i++) { if(array[i] == ' ') { start = false; if(b.length() > 4) keyList.add(URLDecoder.decode(b.substring(4), StandardCharsets.UTF_8.name())); b = new StringBuilder(); } if(start) b.append((char)array[i]); if(array[i] == '\n') { start = true; } if(keyList.size() >= limit) { if (log.isDebugEnabled()) log.debug("Record Limit reached. Will break and return"); return keyList; } } } else if (read < 0 ){ break; } } } catch (Exception e) { if(socket != null) { try { socket.close(); } catch (IOException e1) { log.error("Error closing socket", e1); } } log.error("Exception", e); } finally { if(bufferedReader != null) { try { bufferedReader.close(); } catch (IOException e1) { log.error("Error closing bufferedReader", e1); } } if(printWriter != null) { try { printWriter.close(); } catch (Exception e1) { log.error("Error closing socket", e1); } } if(socket != null) { try { socket.close(); } catch (IOException e) { if (log.isDebugEnabled()) log.debug("Error closing socket", e); } } } } return keyList; } private boolean isDataAvailableForRead(BufferedInputStream bufferedReader, long timeout, TimeUnit unit, Socket socket) throws IOException { long expiry = System.currentTimeMillis() + unit.toMillis(timeout); int tryCount = 0; while(expiry > System.currentTimeMillis()) { if(log.isDebugEnabled()) log.debug("For Socket " + socket + " number of bytes available = " + bufferedReader.available() + " and try number is " + tryCount); if(bufferedReader.available() > 0) { return true; } if(tryCount++ < 5) { try { if(log.isDebugEnabled()) log.debug("Sleep for 100 msec"); Thread.sleep(100); } catch (InterruptedException e) { } } else { return false; } } return false; } public EVCacheItemMetaData metaDebug(String key) throws Exception { final EVCacheItemMetaData obj = evcacheMemcachedClient.metaDebug(key); if(log.isDebugEnabled()) log.debug("EVCacheItemMetaData : " + obj); return obj; } public <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc, boolean _throwException, boolean hasZF) throws Exception { final EVCacheItem<T> obj = evcacheMemcachedClient.asyncMetaGet(key, tc, null).get(readTimeout.get(), TimeUnit.MILLISECONDS, _throwException, hasZF); if (log.isDebugEnabled()) log.debug("EVCacheItem : " + obj); return obj; } public void addTag(String tagName, String tagValue) { final Tag tag = new BasicTag(tagName, tagValue); if(tags.contains(tag)) return; final List<Tag> tagList = new ArrayList<Tag>(tags); tagList.add(tag); this.tags = Collections.<Tag>unmodifiableList(new ArrayList(tagList)); } }
4,054
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/ServerGroup.java
package com.netflix.evcache.pool; public class ServerGroup implements Comparable<ServerGroup> { private final String zone; private final String name; public ServerGroup(String zone, String name) { super(); this.zone = zone; this.name = name; } public String getZone() { return zone; } public String getName() { return name; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((name == null) ? 0 : name.hashCode()); result = prime * result + ((zone == null) ? 0 : zone.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (!(obj instanceof ServerGroup)) return false; ServerGroup other = (ServerGroup) obj; if (name == null) { if (other.name != null) return false; } else if (!name.equals(other.name)) return false; if (zone == null) { if (other.zone != null) return false; } else if (!zone.equals(other.zone)) return false; return true; } @Override public String toString() { return "Server Group [zone=" + zone + (name.equals(zone) ? "" : ", name=" + name) + "]"; } @Override public int compareTo(ServerGroup o) { return toString().compareTo(o.toString()); } }
4,055
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheServerGroupConfig.java
package com.netflix.evcache.pool; import java.net.InetSocketAddress; import java.util.Set; public class EVCacheServerGroupConfig { private final ServerGroup serverGroup; private final Set<InetSocketAddress> inetSocketAddress; public EVCacheServerGroupConfig(ServerGroup serverGroup, Set<InetSocketAddress> inetSocketAddress) { super(); this.serverGroup = serverGroup; this.inetSocketAddress = inetSocketAddress; } public ServerGroup getServerGroup() { return serverGroup; } public Set<InetSocketAddress> getInetSocketAddress() { return inetSocketAddress; } @Override public String toString() { return "EVCacheInstanceConfig [InetSocketAddress=" + inetSocketAddress + "]"; } }
4,056
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPoolMBean.java
package com.netflix.evcache.pool; import java.util.Map; public interface EVCacheClientPoolMBean { int getInstanceCount(); Map<String, String> getInstancesByZone(); Map<String, Integer> getInstanceCountByZone(); Map<String, String> getReadZones(); Map<String, Integer> getReadInstanceCountByZone(); Map<String, String> getWriteZones(); Map<String, Integer> getWriteInstanceCountByZone(); String getFallbackServerGroup(); Map<String, String> getReadServerGroupByZone(); String getLocalServerGroupCircularIterator(); void refreshPool(); String getPoolDetails(); String getEVCacheWriteClientsCircularIterator(); }
4,057
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/SimpleNodeListProvider.java
package com.netflix.evcache.pool; import java.io.IOException; import java.io.InputStreamReader; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; import java.time.Duration; import java.util.*; import java.util.concurrent.TimeUnit; import com.netflix.archaius.api.PropertyRepository; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.json.JSONArray; import org.json.JSONObject; import org.json.JSONTokener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.net.InetAddresses; import com.netflix.archaius.api.Property; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.spectator.api.Tag; import com.netflix.evcache.pool.EVCacheClientPool; public class SimpleNodeListProvider implements EVCacheNodeList { private static final Logger log = LoggerFactory.getLogger(EVCacheClientPool.class); private static final String EUREKA_TIMEOUT = "evcache.eureka.timeout"; private String currentNodeList = ""; private final int timeout; private String region = null; private String env = null; public SimpleNodeListProvider() { final String timeoutStr = System.getProperty(EUREKA_TIMEOUT); this.timeout = (timeoutStr != null) ? Integer.parseInt(timeoutStr) : 5000; final String sysEnv = System.getenv("NETFLIX_ENVIRONMENT"); if(sysEnv != null) { env = sysEnv; } else { String propEnv = null; if(propEnv == null) propEnv = System.getProperty("@environment"); if(propEnv == null) propEnv = System.getProperty("eureka.environment"); if(propEnv == null) propEnv = System.getProperty("netflix.environment"); env = propEnv; } final String sysRegion = System.getenv("EC2_REGION"); if(sysRegion != null) { region = sysRegion; } else { String propRegion = null; if(propRegion == null) propRegion = System.getProperty("@region"); if(propRegion == null) propRegion = System.getProperty("eureka.region"); if(propRegion == null) propRegion = System.getProperty("netflix.region"); region = propRegion; } } /** * Pass a System Property of format * * <EVCACHE_APP>-NODES=setname0=instance01:port,instance02:port, * instance03:port;setname1=instance11:port,instance12:port,instance13:port; * setname2=instance21:port,instance22:port,instance23:port * */ @Override public Map<ServerGroup, EVCacheServerGroupConfig> discoverInstances(String appName) throws IOException { final String propertyName = appName + "-NODES"; final String nodeListString = EVCacheConfig.getInstance().getPropertyRepository().get(propertyName, String.class).orElse("").get(); if (log.isDebugEnabled()) log.debug("List of Nodes = " + nodeListString); if(nodeListString != null && nodeListString.length() > 0) return bootstrapFromSystemProperty(nodeListString); if(env != null && region != null) return bootstrapFromEureka(appName); return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap(); } /** * Netflix specific impl so we can load from eureka. * @param appName * @return * @throws IOException */ private Map<ServerGroup, EVCacheServerGroupConfig> bootstrapFromEureka(String appName) throws IOException { if(env == null || region == null) return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap(); final String url = "http://discoveryreadonly." + region + ".dyn" + env + ".netflix.net:7001/v2/apps/" + appName; final CloseableHttpClient httpclient = HttpClients.createDefault(); final long start = System.currentTimeMillis(); PropertyRepository props = EVCacheConfig.getInstance().getPropertyRepository(); CloseableHttpResponse httpResponse = null; try { final RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout).setConnectTimeout(timeout).build(); HttpGet httpGet = new HttpGet(url); httpGet.addHeader("Accept", "application/json"); httpGet.setConfig(requestConfig); httpResponse = httpclient.execute(httpGet); final int statusCode = httpResponse.getStatusLine().getStatusCode(); if (!(statusCode >= 200 && statusCode < 300)) { log.error("Status Code : " + statusCode + " for url " + url); return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap(); } final InputStreamReader in = new InputStreamReader(httpResponse.getEntity().getContent(), Charset.defaultCharset()); final JSONTokener js = new JSONTokener(in); final JSONObject jsonObj = new JSONObject(js); final JSONObject application = jsonObj.getJSONObject("application"); final JSONArray instances = application.getJSONArray("instance"); final Map<ServerGroup, EVCacheServerGroupConfig> serverGroupMap = new HashMap<ServerGroup, EVCacheServerGroupConfig>(); final int securePort = Integer.parseInt(props.get("evcache.secure.port", String.class) .orElse(EVCacheClientPool.DEFAULT_SECURE_PORT).get()); for(int i = 0; i < instances.length(); i++) { final JSONObject instanceObj = instances.getJSONObject(i); final JSONObject metadataObj = instanceObj.getJSONObject("dataCenterInfo").getJSONObject("metadata"); final String asgName = instanceObj.getString("asgName"); final Property<Boolean> asgEnabled = props.get(asgName + ".enabled", Boolean.class).orElse(true); final boolean isSecure = props.get(asgName + ".use.secure", Boolean.class) .orElseGet(appName + ".use.secure") .orElseGet("evcache.use.secure") .orElse(false).get(); if (!asgEnabled.get()) { if(log.isDebugEnabled()) log.debug("ASG " + asgName + " is disabled so ignoring it"); continue; } final String zone = metadataObj.getString("availability-zone"); final ServerGroup rSet = new ServerGroup(zone, asgName); final String localIp = metadataObj.getString("local-ipv4"); final JSONObject instanceMetadataObj = instanceObj.getJSONObject("metadata"); final String evcachePortString = instanceMetadataObj.optString("evcache.port", EVCacheClientPool.DEFAULT_PORT); final int evcachePort = Integer.parseInt(evcachePortString); final int port = isSecure ? securePort : evcachePort; EVCacheServerGroupConfig config = serverGroupMap.get(rSet); if(config == null) { config = new EVCacheServerGroupConfig(rSet, new HashSet<InetSocketAddress>()); serverGroupMap.put(rSet, config); // final ArrayList<Tag> tags = new ArrayList<Tag>(2); // tags.add(new BasicTag(EVCacheMetricsFactory.CACHE, appName)); // tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, rSet.getName())); // EVCacheMetricsFactory.getInstance().getLongGauge(EVCacheMetricsFactory.CONFIG, tags).set(Long.valueOf(port)); } final InetAddress add = InetAddresses.forString(localIp); final InetAddress inetAddress = InetAddress.getByAddress(localIp, add.getAddress()); final InetSocketAddress address = new InetSocketAddress(inetAddress, port); config.getInetSocketAddress().add(address); } if (log.isDebugEnabled()) log.debug("Returning : " + serverGroupMap); return serverGroupMap; } catch (Exception e) { if (log.isDebugEnabled()) log.debug("URL : " + url + "; Timeout " + timeout, e); } finally { if (httpResponse != null) { try { httpResponse.close(); } catch (IOException e) { } } final List<Tag> tagList = new ArrayList<Tag>(2); EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, appName); if (log.isDebugEnabled()) log.debug("Total Time to execute " + url + " " + (System.currentTimeMillis() - start) + " msec."); EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_BOOTSTRAP_EUREKA, tagList, Duration.ofMillis(100)).record(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); } return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap(); } private Map<ServerGroup, EVCacheServerGroupConfig> bootstrapFromSystemProperty(String nodeListString ) throws IOException { final Map<ServerGroup, EVCacheServerGroupConfig> instancesSpecific = new HashMap<ServerGroup,EVCacheServerGroupConfig>(); final StringTokenizer setTokenizer = new StringTokenizer(nodeListString, ";"); while (setTokenizer.hasMoreTokens()) { final String token = setTokenizer.nextToken(); final StringTokenizer replicaSetTokenizer = new StringTokenizer(token, "="); while (replicaSetTokenizer.hasMoreTokens()) { final String replicaSetToken = replicaSetTokenizer.nextToken(); final String instanceToken = replicaSetTokenizer.nextToken(); final StringTokenizer instanceTokenizer = new StringTokenizer(instanceToken, ","); final Set<InetSocketAddress> instanceList = new HashSet<InetSocketAddress>(); final ServerGroup rSet = new ServerGroup(replicaSetToken, replicaSetToken); final EVCacheServerGroupConfig config = new EVCacheServerGroupConfig(rSet, instanceList); instancesSpecific.put(rSet, config); while (instanceTokenizer.hasMoreTokens()) { final String instance = instanceTokenizer.nextToken(); int index = instance.indexOf(':'); String host = instance.substring(0, index); String port = instance.substring(index + 1); int ind = host.indexOf('/'); if (ind == -1) { final InetAddress add = InetAddress.getByName(host); instanceList.add(new InetSocketAddress(add, Integer.parseInt(port))); } else { final String hostName = host.substring(0, ind); final String localIp = host.substring(ind + 1); final InetAddress add = InetAddresses.forString(localIp); final InetAddress inetAddress = InetAddress.getByAddress(hostName, add.getAddress()); instanceList.add(new InetSocketAddress(inetAddress, Integer.parseInt(port))); } } } } currentNodeList = nodeListString; if(log.isDebugEnabled()) log.debug("List by Servergroup" + instancesSpecific); return instancesSpecific; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{\"Current Node List\":\""); builder.append(currentNodeList); builder.append("\""); builder.append("\"}"); return builder.toString(); } }
4,058
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheValue.java
package com.netflix.evcache.pool; import java.io.Serializable; import java.util.Arrays; public class EVCacheValue implements Serializable { /** * */ private static final long serialVersionUID = 3182483105524224448L; private final String key; private final byte[] value; private final int flags; private final long ttl; private final long createTime; public EVCacheValue(String key, byte[] value, int flags, long ttl, long createTime) { super(); this.key = key; this.value = value; this.flags = flags; this.ttl = ttl; this.createTime = createTime; } public String getKey() { return key; } public byte[] getValue() { return value; } public int getFlags() { return flags; } public long getTTL() { return ttl; } public long getCreateTimeUTC() { return createTime; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (int) (createTime ^ (createTime >>> 32)); result = prime * result + ((key == null) ? 0 : key.hashCode()); result = prime * result + (int) (ttl ^ (ttl >>> 32)); result = prime * result + (int) (flags); result = prime * result + Arrays.hashCode(value); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EVCacheValue other = (EVCacheValue) obj; if (createTime != other.createTime) return false; if (key == null) { if (other.key != null) return false; } else if (!key.equals(other.key)) return false; if (flags != other.flags) return false; if (ttl != other.ttl) return false; if (!Arrays.equals(value, other.value)) return false; return true; } @Override public String toString() { return "EVCacheValue [key=" + key + ", value=" + Arrays.toString(value) + ", flags=" + flags + ", ttl=" + ttl + ", createTime=" + createTime + "]"; } }
4,059
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/observer/EVCacheConnectionObserverMBean.java
package com.netflix.evcache.pool.observer; import java.net.SocketAddress; import java.util.Set; public interface EVCacheConnectionObserverMBean { int getActiveServerCount(); Set<SocketAddress> getActiveServerNames(); int getInActiveServerCount(); Set<SocketAddress> getInActiveServerNames(); long getLostCount(); long getConnectCount(); }
4,060
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/observer/EVCacheConnectionObserver.java
package com.netflix.evcache.pool.observer; import java.lang.management.ManagementFactory; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import javax.management.MBeanServer; import javax.management.ObjectName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.pool.EVCacheClient; import net.spy.memcached.ConnectionObserver; public class EVCacheConnectionObserver implements ConnectionObserver, EVCacheConnectionObserverMBean { private static final Logger log = LoggerFactory.getLogger(EVCacheConnectionObserver.class); private final EVCacheClient client; private long lostCount = 0; private long connectCount = 0; private final Set<SocketAddress> evCacheActiveSet; private final Set<SocketAddress> evCacheInActiveSet; private final Map<InetSocketAddress, Long> evCacheActiveStringSet; private final Map<InetSocketAddress, Long> evCacheInActiveStringSet; // private final Counter connectCounter, connLostCounter; public EVCacheConnectionObserver(EVCacheClient client) { this.client = client; this.evCacheActiveSet = Collections.newSetFromMap(new ConcurrentHashMap<SocketAddress, Boolean>()); this.evCacheInActiveSet = Collections.newSetFromMap(new ConcurrentHashMap<SocketAddress, Boolean>()); this.evCacheActiveStringSet = new ConcurrentHashMap<InetSocketAddress, Long>(); this.evCacheInActiveStringSet = new ConcurrentHashMap<InetSocketAddress, Long>(); // final ArrayList<Tag> tags = new ArrayList<Tag>(client.getTagList().size() + 3); // tags.addAll(client.getTagList()); // tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.CONNECT )); // connectCounter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.CONFIG, tags); // // tags.clear(); // tags.addAll(client.getTagList()); // tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.DISCONNECT )); // connLostCounter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.CONFIG, tags); setupMonitoring(false); } public void connectionEstablished(SocketAddress sa, int reconnectCount) { final String address = sa.toString(); evCacheActiveSet.add(sa); evCacheInActiveSet.remove(sa); final InetSocketAddress inetAdd = (InetSocketAddress) sa; evCacheActiveStringSet.put(inetAdd, Long.valueOf(System.currentTimeMillis())); evCacheInActiveStringSet.remove(inetAdd); if (log.isDebugEnabled()) log.debug(client.getAppName() + ":CONNECTION ESTABLISHED : To " + address + " was established after " + reconnectCount + " retries"); if(log.isTraceEnabled()) log.trace("Stack", new Exception()); // connectCounter.increment(); connectCount++; } public void connectionLost(SocketAddress sa) { final String address = sa.toString(); evCacheInActiveSet.add(sa); evCacheActiveSet.remove(sa); final InetSocketAddress inetAdd = (InetSocketAddress) sa; evCacheInActiveStringSet.put(inetAdd, Long.valueOf(System.currentTimeMillis())); evCacheActiveStringSet.remove(inetAdd); if (log.isDebugEnabled()) log.debug(client.getAppName() + ":CONNECTION LOST : To " + address); if(log.isTraceEnabled()) log.trace("Stack", new Exception()); lostCount++; // connLostCounter.increment(); } public int getActiveServerCount() { return evCacheActiveSet.size(); } public Set<SocketAddress> getActiveServerNames() { return evCacheActiveSet; } public int getInActiveServerCount() { return evCacheInActiveSet.size(); } public Set<SocketAddress> getInActiveServerNames() { return evCacheInActiveSet; } public long getLostCount() { return lostCount; } public long getConnectCount() { return connectCount; } public Map<InetSocketAddress, Long> getInActiveServers() { return evCacheInActiveStringSet; } public Map<InetSocketAddress, Long> getActiveServers() { return evCacheActiveStringSet; } private void setupMonitoring(boolean shutdown) { try { final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + client.getAppName() + ",SubGroup=pool,SubSubGroup=" + client.getServerGroupName()+ ",SubSubSubGroup=" + client.getId()); final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } if (!shutdown) { mbeanServer.registerMBean(this, mBeanName); } } catch (Exception e) { if (log.isWarnEnabled()) log.warn(e.getMessage(), e); } } private void unRegisterInActiveNodes() { try { for (SocketAddress sa : evCacheInActiveSet) { final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + client.getAppName() + ",SubGroup=pool" + ",SubSubGroup=" + client.getServerGroupName() + ",SubSubSubGroup=" + client.getId() + ",SubSubSubSubGroup=" + ((InetSocketAddress) sa).getHostName()); final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (mbeanServer.isRegistered(mBeanName)) { if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one."); mbeanServer.unregisterMBean(mBeanName); } } } catch (Exception e) { if (log.isWarnEnabled()) log.warn(e.getMessage(), e); } } public void shutdown() { unRegisterInActiveNodes(); setupMonitoring(true); } public String toString() { return "EVCacheConnectionObserver [" + "EVCacheClient=" + client + ", evCacheActiveSet=" + evCacheActiveSet + ", evCacheInActiveSet=" + evCacheInActiveSet + ", evCacheActiveStringSet=" + evCacheActiveStringSet + ", evCacheInActiveStringSet=" + evCacheInActiveStringSet + "]"; } public String getAppName() { return client.getAppName(); } public String getServerGroup() { return client.getServerGroup().toString(); } public int getId() { return client.getId(); } public EVCacheClient getClient() { return client; } }
4,061
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/event/EVCacheEvent.java
package com.netflix.evcache.event; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Map; import com.netflix.evcache.EVCache.Call; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.EVCacheKey; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPool; import net.spy.memcached.CachedData; import net.spy.memcached.MemcachedNode; public class EVCacheEvent { public static final String CLIENTS = "clients"; private final Call call; private final String appName; private final String cacheName; private final EVCacheClientPool pool; private final long startTime; private long endTime = 0; private String status = EVCacheMetricsFactory.SUCCESS; private Collection<EVCacheClient> clients = null; private Collection<EVCacheKey> evcKeys = null; private int ttl = 0; private CachedData cachedData = null; private Map<Object, Object> data; public EVCacheEvent(Call call, String appName, String cacheName, EVCacheClientPool pool) { super(); this.call = call; this.appName = appName; this.cacheName = cacheName; this.pool = pool; this.startTime = System.currentTimeMillis(); } public Call getCall() { return call; } public String getAppName() { return appName; } public String getCacheName() { return cacheName; } public EVCacheClientPool getEVCacheClientPool() { return pool; } public Collection<EVCacheKey> getEVCacheKeys() { return evcKeys; } public void setEVCacheKeys(Collection<EVCacheKey> evcacheKeys) { this.evcKeys = evcacheKeys; } public int getTTL() { return ttl; } public void setTTL(int ttl) { this.ttl = ttl; } public CachedData getCachedData() { return cachedData; } public void setCachedData(CachedData cachedData) { this.cachedData = cachedData; } public Collection<EVCacheClient> getClients() { return clients; } public void setClients(Collection<EVCacheClient> clients) { this.clients = clients; } public void setAttribute(Object key, Object value) { if (data == null) data = new HashMap<Object, Object>(); data.put(key, value); } public Object getAttribute(Object key) { if (data == null) return null; return data.get(key); } public void setEndTime(long endTime) { this.endTime = endTime; } public long getEndTime() { return endTime; } public void setStatus(String status) { this.status = status; } public String getStatus() { return status; } /* * Will return the duration of the call if available else -1 */ public long getDurationInMillis() { if(endTime == 0) return -1; return endTime - startTime; } @Override public int hashCode() { return evcKeys.hashCode(); } /** * @deprecated replaced by {@link #getEVCacheKeys()} */ @Deprecated public Collection<String> getKeys() { if(evcKeys == null || evcKeys.size() == 0) return Collections.<String>emptyList(); final Collection<String> keyList = new ArrayList<String>(evcKeys.size()); for(EVCacheKey key : evcKeys) { keyList.add(key.getKey()); } return keyList; } /** * @deprecated replaced by {@link #setEVCacheKeys(Collection)} */ @Deprecated public void setKeys(Collection<String> keys) { } /** * @deprecated replaced by {@link #getEVCacheKeys()} */ @Deprecated public Collection<String> getCanonicalKeys() { if(evcKeys == null || evcKeys.size() == 0) return Collections.<String>emptyList(); final Collection<String> keyList = new ArrayList<String>(evcKeys.size()); for(EVCacheKey key : evcKeys) { keyList.add(key.getCanonicalKey()); } return keyList; } public Collection<MemcachedNode> getMemcachedNode(EVCacheKey evckey) { final Collection<MemcachedNode> nodeList = new ArrayList<MemcachedNode>(clients.size()); for(EVCacheClient client : clients) { String key = evckey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()); nodeList.add(client.getNodeLocator().getPrimary(key)); } return nodeList; } /** * @deprecated replaced by {@link #setEVCacheKeys(Collection)} */ public void setCanonicalKeys(Collection<String> canonicalKeys) { } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EVCacheEvent other = (EVCacheEvent) obj; if (appName == null) { if (other.appName != null) return false; } else if (!appName.equals(other.appName)) return false; if (cacheName == null) { if (other.cacheName != null) return false; } else if (!cacheName.equals(other.cacheName)) return false; if (call != other.call) return false; if (evcKeys == null) { if (other.evcKeys != null) return false; } else if (!evcKeys.equals(other.evcKeys)) return false; return true; } public long getStartTime() { return this.startTime; } @Override public String toString() { return "EVCacheEvent [call=" + call + ", appName=" + appName + ", cacheName=" + cacheName + ", Num of Clients=" + clients.size() + ", evcKeys=" + evcKeys + ", ttl=" + ttl + ", event Time=" + (new Date(startTime)).toString() + ", cachedData=" + (cachedData != null ? "[ Flags : " + cachedData.getFlags() + "; Data Array length : " +cachedData.getData().length + "] " : "null") + ", Attributes=" + data + "]"; } }
4,062
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/event/EVCacheEventListener.java
package com.netflix.evcache.event; import java.util.EventListener; import com.netflix.evcache.EVCacheException; public interface EVCacheEventListener extends EventListener { void onStart(EVCacheEvent e); void onComplete(EVCacheEvent e); void onError(EVCacheEvent e, Throwable t); boolean onThrottle(EVCacheEvent e) throws EVCacheException; }
4,063
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/event
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/event/throttle/ThrottleListener.java
package com.netflix.evcache.event.throttle; import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import javax.inject.Inject; import javax.inject.Singleton; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.evcache.EVCache.Call; import com.netflix.evcache.event.EVCacheEvent; import com.netflix.evcache.event.EVCacheEventListener; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.util.EVCacheConfig; /** * <p> * To enable throttling on operations the set the below property * <code>EVCacheThrottler.throttle.operations=true</code> * </p> * <p> * To throttle all operations specified in {@link Call} then add the {@link Call} (separated by comma(,)) to the below property.<br> * <code>&lt;EVCache appName&gt;.throttle.calls=&lt;comma separated list of calls&gt;</code><br> * <br> * EX: To throttle {@link Call.GET} and {@link Call.DELETE} operations for EVCACHE_CRS set the below property * <code>EVCACHE_CRS.throttle.calls=GET,DELETE</code> * * @author smadappa */ @Singleton public class ThrottleListener implements EVCacheEventListener { private static final Logger log = LoggerFactory.getLogger(ThrottleListener.class); private final Map<String, Property<Set<String>>> _ignoreOperationsMap; private final Property<Boolean> enableThrottleOperations; private final EVCacheClientPoolManager poolManager; @Inject public ThrottleListener(EVCacheClientPoolManager poolManager) { this.poolManager = poolManager; this._ignoreOperationsMap = new ConcurrentHashMap<String, Property<Set<String>>>(); enableThrottleOperations = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler.throttle.operations", Boolean.class).orElse(false); enableThrottleOperations.subscribe(i -> setupListener()); if(enableThrottleOperations.get()) setupListener(); } private void setupListener() { if(enableThrottleOperations.get()) { poolManager.addEVCacheEventListener(this); } else { poolManager.removeEVCacheEventListener(this); } } public void onStart(final EVCacheEvent e) { } @Override public boolean onThrottle(final EVCacheEvent e) { if(!enableThrottleOperations.get()) return false; final String appName = e.getAppName(); Property<Set<String>> throttleCalls = _ignoreOperationsMap.get(appName).orElse(Collections.emptySet()); if(throttleCalls.get().size() > 0 && throttleCalls.get().contains(e.getCall().name())) { if(log.isDebugEnabled()) log.debug("Call : " + e.getCall() + " is throttled"); return true; } return false; } public void onComplete(EVCacheEvent e) { } public void onError(EVCacheEvent e, Throwable t) { } }
4,064
0
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/event
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/event/hotkey/HotKeyListener.java
package com.netflix.evcache.event.hotkey; import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.inject.Singleton; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.netflix.archaius.api.Property; import com.netflix.evcache.EVCacheKey; import com.netflix.evcache.event.EVCacheEvent; import com.netflix.evcache.event.EVCacheEventListener; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.util.EVCacheConfig; /** * <p> * To enable throttling of requests on the client for keys that are sending too many requests in a short duration then set the below property * <code>EVCacheThrottler.throttle.hot.keys=true</code> * </p> * <br> * Hot keys can be throttled in 2 ways. * * <ol> * <li>If there are set of keys that are determined by an offline process or enabling debugging then we can set the following property (, separated) * * ex: <code><evcache appName>.throttle.keys=key1,key2</code> * This will throttle all operations for keys key1 and key2 * * </li><li>Another option is to dynamically figure based on metrics if a key is having a lot of operations. * At the start of every operation we add the key to an internal cache for a duration specified by <code>EVCacheThrottler.< evcache appName>.inmemory.expire.after.write.duration.ms</code> (default is 10 seconds). * If a key appears again within this duration we increment the value and release the key for <code>EVCacheThrottler.< evcache appName>.inmemory.expire.after.access.duration.ms</code> (default is 10 seconds). * Once the key count crosses <code>EVCacheThrottler.< evcache appName>.throttle.value</code> (default is 3) then the key will be throttled. YMMV so tune this based on your evcache app and client requests. * </li> * * @author smadappa * */ @Singleton public class HotKeyListener implements EVCacheEventListener { private static final Logger log = LoggerFactory.getLogger(HotKeyListener.class); private final Map<String, Property<Boolean>> throttleMap; private final Map<String, Cache<String, Integer>> cacheMap; private final Integer START_VAL = Integer.valueOf(1); private final Property<Boolean> enableThrottleHotKeys; private final EVCacheClientPoolManager poolManager; private final Map<String, Property<Set<String>>> throttleKeysMap; @Inject public HotKeyListener(EVCacheClientPoolManager poolManager) { this.poolManager = poolManager; this.throttleKeysMap = new ConcurrentHashMap<String, Property<Set<String>>>(); this.throttleMap = new ConcurrentHashMap<String, Property<Boolean>>(); cacheMap = new ConcurrentHashMap<String, Cache<String, Integer>>(); enableThrottleHotKeys = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler.throttle.hot.keys", Boolean.class).orElse(false); enableThrottleHotKeys.subscribe((i) -> setupHotKeyListener()); if(enableThrottleHotKeys.get()) setupHotKeyListener(); } private void setupHotKeyListener() { if(enableThrottleHotKeys.get()) { poolManager.addEVCacheEventListener(this); } else { poolManager.removeEVCacheEventListener(this); for(Cache<String, Integer> cache : cacheMap.values()) { cache.invalidateAll(); } } } private Cache<String, Integer> getCache(String appName) { Property<Boolean> throttleFlag = throttleMap.get(appName); if(throttleFlag == null) { throttleFlag = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler." + appName + ".throttle.hot.keys", Boolean.class).orElse(false); throttleMap.put(appName, throttleFlag); } if(log.isDebugEnabled()) log.debug("Throttle hot keys : " + throttleFlag); if(!throttleFlag.get()) { return null; } Cache<String, Integer> cache = cacheMap.get(appName); if(cache != null) return cache; final Property<Integer> _cacheDuration = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler." + appName + ".inmemory.expire.after.write.duration.ms", Integer.class).orElse(10000); final Property<Integer> _exireAfterAccessDuration = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler." + appName + ".inmemory.expire.after.access.duration.ms", Integer.class).orElse(10000); final Property<Integer> _cacheSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler." + appName + ".inmemory.cache.size", Integer.class).orElse(100); CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder().recordStats(); if(_cacheSize.get() > 0) { builder = builder.maximumSize(_cacheSize.get()); } if(_exireAfterAccessDuration.get() > 0) { builder = builder.expireAfterAccess(_exireAfterAccessDuration.get(), TimeUnit.MILLISECONDS); } else if(_cacheDuration.get() > 0) { builder = builder.expireAfterWrite(_cacheDuration.get(), TimeUnit.MILLISECONDS); } cache = builder.build(); cacheMap.put(appName, cache); return cache; } public void onStart(final EVCacheEvent e) { if(!enableThrottleHotKeys.get()) return; final Cache<String, Integer> cache = getCache(e.getAppName()); if(cache == null) return; for(EVCacheKey evcKey : e.getEVCacheKeys()) { final String key = evcKey.getKey(); Integer val = cache.getIfPresent(key); if(val == null) { cache.put(key, START_VAL); } else { cache.put(key, Integer.valueOf(val.intValue() + 1)); } } } @Override public boolean onThrottle(final EVCacheEvent e) { if(!enableThrottleHotKeys.get()) return false; final String appName = e.getAppName(); Property<Set<String>> throttleKeysSet = throttleKeysMap.get(appName).orElse(Collections.emptySet()); if(throttleKeysSet.get().size() > 0) { if(log.isDebugEnabled()) log.debug("Throttle : " + throttleKeysSet); for(EVCacheKey evcKey : e.getEVCacheKeys()) { final String key = evcKey.getKey(); if(throttleKeysSet.get().contains(key)) { if(log.isDebugEnabled()) log.debug("Key : " + key + " is throttled"); return true; } } } final Cache<String, Integer> cache = getCache(appName); if(cache == null) return false; final Property<Integer> _throttleVal = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheThrottler." + appName + ".throttle.value", Integer.class).orElse(3); for(EVCacheKey evcKey : e.getEVCacheKeys()) { final String key = evcKey.getKey(); Integer val = cache.getIfPresent(key); if(val.intValue() > _throttleVal.get()) { if(log.isDebugEnabled()) log.debug("Key : " + key + " has exceeded " + _throttleVal.get() + ". Will throttle this request"); return true; } } return false; } public void onComplete(EVCacheEvent e) { if(!enableThrottleHotKeys.get()) return; final String appName = e.getAppName(); final Cache<String, Integer> cache = getCache(appName); if(cache == null) return; for(EVCacheKey evcKey : e.getEVCacheKeys()) { final String key = evcKey.getKey(); Integer val = cache.getIfPresent(key); if(val != null) { cache.put(key, Integer.valueOf(val.intValue() - 1)); } } } public void onError(EVCacheEvent e, Throwable t) { if(!enableThrottleHotKeys.get()) return; final String appName = e.getAppName(); final Cache<String, Integer> cache = getCache(appName); if(cache == null) return; for(EVCacheKey evcKey : e.getEVCacheKeys()) { final String key = evcKey.getKey(); Integer val = cache.getIfPresent(key); if(val != null) { cache.put(key, Integer.valueOf(val.intValue() - 1)); } } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((cacheMap == null) ? 0 : cacheMap.hashCode()); result = prime * result + ((throttleMap == null) ? 0 : throttleMap.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; HotKeyListener other = (HotKeyListener) obj; if (cacheMap == null) { if (other.cacheMap != null) return false; } else if (!cacheMap.equals(other.cacheMap)) return false; if (throttleMap == null) { if (other.throttleMap != null) return false; } else if (!throttleMap.equals(other.throttleMap)) return false; return true; } }
4,065
0
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service/StartServer.java
package com.netflix.evcache.service; import java.util.HashMap; import java.util.Map; import javax.servlet.ServletContextEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.inject.Injector; import com.google.inject.servlet.ServletModule; import com.netflix.evcache.EVCacheClientLibrary; import com.netflix.evcache.service.resources.EVCacheRESTService; import com.netflix.evcservice.service.StatusPage; import com.netflix.server.base.BaseHealthCheckServlet; import com.netflix.server.base.BaseStatusPage; import com.netflix.server.base.NFFilter; import com.netflix.server.base.lifecycle.BaseServerLifecycleListener; import com.sun.jersey.api.core.ResourceConfig; import com.sun.jersey.api.core.PackagesResourceConfig; import com.sun.jersey.guice.JerseyServletModule; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; public class StartServer extends BaseServerLifecycleListener { private static final Logger logger = LoggerFactory.getLogger(StartServer.class); private static final String APP_NAME = "evcacheproxy"; private static final String CONFIG_NAME = "evcacheproxy"; /** * Creates a new StartServer object. */ public StartServer() { super(CONFIG_NAME, APP_NAME, null); } @Override protected void initialize(ServletContextEvent sce) throws Exception { Injector injector = getInjector(); injector.getInstance(EVCacheClientLibrary.class); } @Override protected ServletModule getServletModule() { return new JerseyServletModule() { @Override protected void configureServlets() { logger.info("########## CONFIGURING SERVLETS ##########"); // initialize NFFilter Map<String, String> initParams = new HashMap<String,String>(); // initParams.put(ServletContainer.JSP_TEMPLATES_BASE_PATH, "/WEB-INF/jsp"); // initParams.put(ServletContainer.FEATURE_FILTER_FORWARD_ON_404, "true"); // initParams.put("requestId.accept", "true"); // initParams.put("requestId.require", "true"); initParams.put(ResourceConfig.FEATURE_DISABLE_WADL, "true"); initParams.put(PackagesResourceConfig.PROPERTY_PACKAGES, "com.netflix.evcache.service.resources"); filter("/*").through(NFFilter.class, initParams); filter("/healthcheck", "/status").through(NFFilter.class, initParams); serve("/Status", "/status").with(BaseStatusPage.class); serve("/healthcheck", "/Healthcheck").with(BaseHealthCheckServlet.class); serve("/*").with(GuiceContainer.class, initParams); bind(EVCacheRESTService.class).asEagerSingleton(); binder().bind(GuiceContainer.class).asEagerSingleton(); install(new EVCacheServiceModule()); } }; } }
4,066
0
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service/HealthCheckHandlerImpl.java
package com.netflix.evcache.service; import com.google.inject.Singleton; import com.netflix.server.base.BaseHealthCheckServlet; /** * Created by senugula on 03/22/15. */ @Singleton public class HealthCheckHandlerImpl extends BaseHealthCheckServlet { public int getStatus() { return 200; // TODO } }
4,067
0
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service/EVCacheServiceModule.java
package com.netflix.evcache.service; import com.google.inject.AbstractModule; import com.netflix.appinfo.ApplicationInfoManager; import com.netflix.config.ConfigurationManager; import com.netflix.discovery.guice.EurekaModule; import com.netflix.evcache.EVCacheModule; import com.netflix.evcache.connection.ConnectionModule; import com.netflix.evcache.service.resources.EVCacheRESTService; import com.netflix.governator.ShutdownHookModule; import com.netflix.spectator.nflx.SpectatorModule; import com.sun.jersey.guice.JerseyServletModule; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; import netflix.adminresources.resources.KaryonWebAdminModule; public class EVCacheServiceModule extends AbstractModule { @Override protected void configure() { // try { // ConfigurationManager.loadAppOverrideProperties("evcacheproxy"); // final String env = ConfigurationManager.getConfigInstance().getString("eureka.environment", "test"); // if(env != null && env.length() > 0) { // ConfigurationManager.loadAppOverrideProperties("evcacheproxy-"+env); // } // } catch (Exception e) { // e.printStackTrace(); // } // // // install(new ShutdownHookModule()); // install(new EurekaModule()); // install(new SpectatorModule()); // install(new ConnectionModule()); // install(new EVCacheModule()); // install(new KaryonWebAdminModule()); // install(new JerseyServletModule() { // protected void configureServlets() { // serve("/*").with(GuiceContainer.class); // binder().bind(GuiceContainer.class).asEagerSingleton(); // bind(EVCacheRESTService.class).asEagerSingleton(); // bind(HealthCheckHandlerImpl.class).asEagerSingleton(); // } // }); } }
4,068
0
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service/resources/EVCacheRESTService.java
package com.netflix.evcache.service.resources; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.evcache.EVCache; import com.netflix.evcache.EVCacheException; import com.netflix.evcache.EVCacheLatch; import com.netflix.evcache.EVCacheLatch.Policy; import com.netflix.evcache.service.transcoder.RESTServiceTranscoder; import net.spy.memcached.CachedData; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.ws.rs.*; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.InputStream; import java.util.HashMap; import java.util.Map; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; /** * Created by senugula on 3/22/16. */ @Singleton @Path("/evcrest/v1.0") public class EVCacheRESTService { private static final Logger logger = LoggerFactory.getLogger(EVCacheRESTService.class); private final EVCache.Builder builder; private final Map<String, EVCache> evCacheMap; private final RESTServiceTranscoder evcacheTranscoder = new RESTServiceTranscoder(); @Inject public EVCacheRESTService(EVCache.Builder builder) { this.builder = builder; this.evCacheMap = new HashMap<>(); } @POST @Path("{appId}/{key}") @Consumes({MediaType.APPLICATION_OCTET_STREAM}) @Produces(MediaType.TEXT_PLAIN) public Response setOperation(final InputStream in, @PathParam("appId") String pAppId, @PathParam("key") String key, @QueryParam("ttl") String ttl, @DefaultValue("") @QueryParam("flag") String flag) { try { final String appId = pAppId.toUpperCase(); final byte[] bytes = IOUtils.toByteArray(in); return setData(appId, ttl, flag, key, bytes); } catch (EVCacheException e) { e.printStackTrace(); return Response.serverError().build(); } catch (Throwable t) { return Response.serverError().build(); } } @PUT @Path("{appId}/{key}") @Consumes({MediaType.APPLICATION_OCTET_STREAM}) @Produces(MediaType.TEXT_PLAIN) public Response putOperation(final InputStream in, @PathParam("appId") String pAppId, @PathParam("key") String key, @QueryParam("ttl") String ttl, @DefaultValue("") @QueryParam("flag") String flag) { try { final String appId = pAppId.toUpperCase(); final byte[] bytes = IOUtils.toByteArray(in); return setData(appId, ttl, flag, key, bytes); } catch (EVCacheException e) { e.printStackTrace(); return Response.serverError().build(); } catch (Throwable t) { return Response.serverError().build(); } } private Response setData(String appId, String ttl, String flag, String key, byte[] bytes) throws EVCacheException, InterruptedException { final EVCache evcache = getEVCache(appId); if (ttl == null) { return Response.status(400).type("text/plain").entity("Please specify ttl for the key " + key + " as query parameter \n").build(); } final int timeToLive = Integer.valueOf(ttl).intValue(); EVCacheLatch latch = null; if(flag != null && flag.length() > 0) { final CachedData cd = new CachedData(Integer.parseInt(flag), bytes, Integer.MAX_VALUE); latch = evcache.set(key, cd, timeToLive, Policy.ALL_MINUS_1); } else { latch = evcache.set(key, bytes, timeToLive, Policy.ALL_MINUS_1); } if(latch != null) { final boolean status = latch.await(2500, TimeUnit.MILLISECONDS); if(status) { return Response.ok("Set Operation for Key - " + key + " was successful. \n").build(); } else { if(latch.getCompletedCount() > 0) { if(latch.getSuccessCount() == 0){ return Response.serverError().build(); } else if(latch.getSuccessCount() > 0 ) { return Response.ok("Set Operation for Key - " + key + " was successful in " + latch.getSuccessCount() + " Server Groups. \n").build(); } } else { return Response.serverError().build(); } } } return Response.serverError().build(); } @GET @Path("{appId}/{key}") @Produces({MediaType.APPLICATION_OCTET_STREAM}) public Response getOperation(@PathParam("appId") String appId, @PathParam("key") String key) { appId = appId.toUpperCase(); if (logger.isDebugEnabled()) logger.debug("Get for application " + appId + " for Key " + key); try { final EVCache evCache = getEVCache(appId); CachedData cachedData = (CachedData) evCache.get(key, evcacheTranscoder); if (cachedData == null) { return Response.status(404).type("text/plain").entity("Key " + key + " Not Found in cache " + appId + "\n").build(); } byte[] bytes = cachedData.getData(); if (bytes == null) { return Response.status(404).type("text/plain").entity("Key " + key + " Not Found in cache " + appId + "\n").build(); } else { return Response.status(200).type("application/octet-stream").entity(bytes).build(); } } catch (EVCacheException e) { e.printStackTrace(); return Response.serverError().build(); } } @DELETE @Path("{appId}/{key}") @Consumes(MediaType.APPLICATION_JSON) @Produces("text/plain") public Response deleteOperation(@PathParam("appId") String appId, @PathParam("key") String key) { if (logger.isDebugEnabled()) logger.debug("Get for application " + appId + " for Key " + key); appId = appId.toUpperCase(); final EVCache evCache = getEVCache(appId); try { Future<Boolean>[] _future = evCache.delete(key); if (_future.equals(Boolean.TRUE)) { if (logger.isDebugEnabled()) logger.debug("set key is successful"); } return Response.ok("Deleted Operation for Key - " + key + " was successful. \n").build(); } catch (EVCacheException e) { e.printStackTrace(); return Response.serverError().build(); } } private EVCache getEVCache(String appId) { EVCache evCache = evCacheMap.get(appId); if (evCache != null) return evCache; evCache = builder.setAppName(appId).build(); evCacheMap.put(appId, evCache); return evCache; } }
4,069
0
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcache/service/transcoder/RESTServiceTranscoder.java
package com.netflix.evcache.service.transcoder; import net.spy.memcached.CachedData; import net.spy.memcached.transcoders.SerializingTranscoder; /** * Created by senugula on 6/23/16. */ public class RESTServiceTranscoder extends SerializingTranscoder { static final int COMPRESSED = 2; public RESTServiceTranscoder() { } public boolean asyncDecode(CachedData d) { return false; } public CachedData decode(CachedData d) { if ((d.getFlags() & COMPRESSED) != 0) { d = new CachedData(d.getFlags(), super.decompress(d.getData()), d.MAX_SIZE); } return d; } public CachedData encode(CachedData o) { return o; } public int getMaxSize() { return CachedData.MAX_SIZE; } }
4,070
0
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcservice
Create_ds/EVCache/evcacheproxy/src/main/java/com/netflix/evcservice/service/StatusPage.java
package com.netflix.evcservice.service; import com.google.inject.Singleton; import java.io.PrintWriter; import com.netflix.server.base.BaseStatusPage; /** * Created by senugula on 03/22/15. */ @Singleton public class StatusPage extends BaseStatusPage { private static final long serialVersionUID = 1L; @Override protected void getDetails(PrintWriter out, boolean htmlize) { super.getDetails(out, htmlize); // Add any extra status info here } }
4,071
0
Create_ds/EVCache/evcache-client/test/com/netflix/evcache
Create_ds/EVCache/evcache-client/test/com/netflix/evcache/test/DIBase.java
package com.netflix.evcache.test; import com.google.inject.Injector; import com.netflix.appinfo.ApplicationInfoManager; import com.netflix.archaius.config.MapConfig; import com.netflix.archaius.guice.ArchaiusModule; import com.netflix.discovery.guice.EurekaClientModule; import com.netflix.evcache.EVCache; import com.netflix.evcache.EVCacheLatch; import com.netflix.evcache.EVCacheModule; import com.netflix.evcache.EVCacheLatch.Policy; import com.netflix.evcache.connection.DIConnectionModule; import com.netflix.evcache.operation.EVCacheLatchImpl; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.governator.guice.LifecycleInjector; import com.netflix.governator.guice.LifecycleInjectorBuilder; import com.netflix.governator.lifecycle.LifecycleManager; import com.netflix.spectator.nflx.SpectatorModule; import java.util.Arrays; import java.util.Map; import java.util.Properties; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import rx.Scheduler; @SuppressWarnings("unused") public abstract class DIBase { private static final Logger log = LoggerFactory.getLogger(DIBase.class); protected EVCache evCache = null; protected Injector injector = null; protected LifecycleManager lifecycleManager = null; protected EVCacheClientPoolManager manager = null; protected Properties getProps() { String hostname = System.getenv("EC2_HOSTNAME"); Properties props = new Properties(); if(hostname == null) { props.setProperty("eureka.datacenter", "datacenter");//change to ndc while running on desktop props.setProperty("eureka.validateInstanceId","false"); props.setProperty("eureka.mt.connect_timeout","1"); props.setProperty("eureka.mt.read_timeout","1"); } else { props.setProperty("eureka.datacenter", "cloud"); props.setProperty("eureka.validateInstanceId","true"); } System.setProperty("@region", "us-east-1"); System.setProperty("@environment", "test"); System.setProperty("eureka.region", "us-east-1"); System.setProperty("eureka.environment", "test"); props.setProperty("eureka.environment", "test"); props.setProperty("eureka.region", "us-east-1"); props.setProperty("eureka.appid", "clatency"); props.setProperty("eureka.serviceUrl.default","http://${eureka.region}.discovery${eureka.environment}.netflix.net:7001/discovery/v2/"); props.setProperty("log4j.rootLogger", "DEBUG"); System.setProperty("log4j.rootLogger", "DEBUG"); props.setProperty("log4j.logger.com.netflix.evcache.test.DIBase", "DEBUG"); props.setProperty("log4j.logger.com.netflix.evcache.test.EVCacheTestDI", "DEBUG"); props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheNodeLocator", "ERROR"); props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheClientUtil", "DEBUG"); return props; } public void setupTest(Properties props) { } @BeforeSuite public void setupEnv() { Properties props = getProps(); try { LifecycleInjectorBuilder builder = LifecycleInjector.builder(); builder.withModules( new EurekaClientModule(), new EVCacheModule(), new DIConnectionModule(), new SpectatorModule(), new ArchaiusModule() { protected void configureArchaius() { bindApplicationConfigurationOverride().toInstance(MapConfig.from(props)); }; } ); injector = builder.build().createInjector(); lifecycleManager = injector.getInstance(LifecycleManager.class); lifecycleManager.start(); injector.getInstance(ApplicationInfoManager.class); final EVCacheModule lib = injector.getInstance(EVCacheModule.class); manager = injector.getInstance(EVCacheClientPoolManager.class); } catch (Throwable e) { e.printStackTrace(); log.error(e.getMessage(), e); } } @AfterSuite public void shutdownEnv() { lifecycleManager.close(); } protected EVCache.Builder getNewBuilder() { final EVCache.Builder evCacheBuilder = injector.getInstance(EVCache.Builder.class); if(log.isDebugEnabled()) log.debug("evCacheBuilder : " + evCacheBuilder); return evCacheBuilder; } protected boolean append(int i, EVCache gCache) throws Exception { String val = ";APP_" + i; String key = "key_" + i; Future<Boolean>[] status = gCache.append(key, val, 60 * 60); for (Future<Boolean> s : status) { if (log.isDebugEnabled()) log.debug("APPEND : key : " + key + "; success = " + s.get() + "; Future = " + s.toString()); if (s.get() == Boolean.FALSE) return false; } return true; } protected boolean appendOrAdd(int i, EVCache gCache) throws Exception { return appendOrAdd(i, gCache, 60 * 60); } protected boolean appendOrAdd(int i, EVCache gCache, int ttl) throws Exception { String val = "val_aa_" + i; String key = "key_" + i; EVCacheLatch latch = gCache.appendOrAdd(key, val, null, ttl, Policy.ALL_MINUS_1); if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; Latch = " + latch); boolean status = latch.await(2000, TimeUnit.MILLISECONDS); if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; success = " + status); return true; } public boolean add(int i, EVCache gCache) throws Exception { //String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i; String val = "val_add_"+i; String key = "key_" + i; boolean status = gCache.add(key, val, null, 60 * 60); if(log.isDebugEnabled()) log.debug("ADD : key : " + key + "; success = " + status); return status; } public boolean insert(int i, EVCache gCache) throws Exception { //String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i; String val = "val_"+i; String key = "key_" + i; Future<Boolean>[] status = gCache.set(key, val, 60 * 60); for(Future<Boolean> s : status) { if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString()); if(s.get() == Boolean.FALSE) return false; } return true; } protected boolean replace(int i, EVCache gCache) throws Exception { return replace(i, gCache, 60 * 60); } protected boolean replace(int i, EVCache gCache, int ttl) throws Exception { String val = "val_replaced_" + i; String key = "key_" + i; EVCacheLatch status = gCache.replace(key, val, null, ttl, Policy.ALL); boolean opStatus = status.await(1000, TimeUnit.MILLISECONDS); if (log.isDebugEnabled()) log.debug("REPLACE : key : " + key + "; success = " + opStatus + "; EVCacheLatch = " + status); return status.getSuccessCount() > 0; } public boolean delete(int i, EVCache gCache) throws Exception { String key = "key_" + i; Future<Boolean>[] status = gCache.delete(key); for(Future<Boolean> s : status) { if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString()); if(s.get() == Boolean.FALSE) return false; } return true; } protected boolean touch(int i, EVCache gCache) throws Exception { return touch(i, gCache, 60 * 60); } protected boolean touch(int i, EVCache gCache, int ttl) throws Exception { String key = "key_" + i; Future<Boolean>[] status = gCache.touch(key, ttl); for (Future<Boolean> s : status) { if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString()); if (s.get() == Boolean.FALSE) return false; } return true; } @SuppressWarnings("deprecation") protected boolean insertUsingLatch(int i, String app) throws Exception { String val = "val_" + i; String key = "key_" + i; long start = System.currentTimeMillis(); final EVCacheClient[] clients = manager.getEVCacheClientPool(app).getEVCacheClientForWrite(); final EVCacheLatch latch = new EVCacheLatchImpl(EVCacheLatch.Policy.ALL, clients.length, app); for (EVCacheClient client : clients) { client.set(key, val, 60 * 60, latch); } boolean success = latch.await(1000, TimeUnit.MILLISECONDS); if (log.isDebugEnabled()) log.debug("SET LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec"); return success; } protected boolean deleteLatch(int i, String appName) throws Exception { long start = System.currentTimeMillis(); String key = "key_" + i; final EVCacheClient[] clients = manager.getEVCacheClientPool(appName).getEVCacheClientForWrite(); final EVCacheLatch latch = new EVCacheLatchImpl(Policy.ALL, clients.length, appName); for (EVCacheClient client : clients) { client.delete(key, latch); } latch.await(1000, TimeUnit.MILLISECONDS); if (log.isDebugEnabled()) log.debug("DELETE LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec" + "; Latch : " + latch); return true; } public String get(int i, EVCache gCache) throws Exception { String key = "key_" + i; String value = gCache.<String>get(key); if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value); return value; } public String getAndTouch(int i, EVCache gCache) throws Exception { String key = "key_" + i; String value = gCache.<String>getAndTouch(key, 60 * 60); if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value); return value; } public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception { final Map<String, String> value = gCache.<String>getBulk(keys); if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value); return value; } public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception { final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl); if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value); return value; } public String getObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception { String key = "key_" + i; String value = gCache.<String>get(key, scheduler).toBlocking().value(); if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value); return value; } public String getAndTouchObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception { String key = "key_" + i; String value = gCache.<String>getAndTouch(key, 60 * 60, scheduler).toBlocking().value(); if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value); return value; } class RemoteCaller implements Runnable { EVCache gCache; public RemoteCaller(EVCache c) { this.gCache = c; } public void run() { try { int count = 1; for(int i = 0; i < 100; i++) { insert(i, gCache); get(i, gCache); delete(i, gCache); } } catch (Exception e) { log.error(e.getMessage(), e); } } } }
4,072
0
Create_ds/EVCache/evcache-client/test/com/netflix/evcache
Create_ds/EVCache/evcache-client/test/com/netflix/evcache/test/EVCacheTestDI.java
package com.netflix.evcache.test; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import java.util.Map; import java.util.Properties; import java.util.*; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import com.netflix.evcache.*; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.ServerGroup; import com.netflix.evcache.util.KeyHasher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.Test; import com.netflix.evcache.operation.EVCacheOperationFuture; import rx.schedulers.Schedulers; import static org.testng.Assert.*; public class EVCacheTestDI extends DIBase implements EVCacheGetOperationListener<String> { private static final Logger log = LoggerFactory.getLogger(EVCacheTestDI.class); private int loops = 1; private Map<String, String> propertiesToSet; private String appName = "EVCACHE_TEST"; public static void main(String args[]) { try { EVCacheTestDI test = new EVCacheTestDI(); test.testAll(); } catch(Throwable t) { log.error(t.getMessage(), t); } } public EVCacheTestDI() { propertiesToSet = new HashMap<>(); propertiesToSet.putIfAbsent(appName + ".us-east-1d.EVCacheClientPool.writeOnly", "false"); propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.poolSize", "1"); propertiesToSet.putIfAbsent(appName + ".ping.servers", "false"); propertiesToSet.putIfAbsent(appName + ".cid.throw.exception", "true"); propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.readTimeout", "500"); propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.bulkReadTimeout", "500"); propertiesToSet.putIfAbsent(appName + ".max.read.queue.length", "20"); propertiesToSet.putIfAbsent("EVCacheClientPoolManager.log.apps", appName); propertiesToSet.putIfAbsent(appName + ".fallback.zone", "true"); propertiesToSet.putIfAbsent(appName + ".enable.throttling", "false"); propertiesToSet.putIfAbsent(appName + ".throttle.time", "0"); propertiesToSet.putIfAbsent(appName + ".throttle.percent", "0"); propertiesToSet.putIfAbsent(appName + ".log.operation", "1000"); propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.validate.input.queue", "true"); propertiesToSet.putIfAbsent("evcache.use.binary.protocol", "false"); } protected Properties getProps() { Properties props = super.getProps(); propertiesToSet.entrySet().forEach(entry -> props.setProperty(entry.getKey(), entry.getValue())); return props; } @Test public void testEVCache() { this.evCache = getNewBuilder().setAppName(appName).setCachePrefix("cid").enableRetry().build(); assertNotNull(evCache); } @Test(dependsOnMethods = { "testEVCache" }) public void testKeySizeCheck() throws Exception { final String key = "This is an invalid key"; boolean exceptionThrown = false; for (int i = 0; i < loops; i++) { try { if (log.isDebugEnabled()) log.debug("Check key : " + key ); evCache.<String>get(key); } catch(Exception e) { exceptionThrown = true; if (log.isDebugEnabled()) log.debug("Check key : " + key + ": INVALID"); } assertTrue(exceptionThrown); } final String longKey = "This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key."; exceptionThrown = false; for (int i = 0; i < loops; i++) { try { if (log.isDebugEnabled()) log.debug("Check key length : " + longKey ); evCache.<String>get(longKey); } catch(Exception e) { exceptionThrown = true; if (log.isDebugEnabled()) log.debug("Check key length: " + longKey + ": INVALID"); } assertTrue(exceptionThrown); } } @Test(dependsOnMethods = { "testKeySizeCheck" }) public void testTouch() throws Exception { for (int i = 0; i < loops; i++) { touch(i, evCache); } } @Test(dependsOnMethods = { "testTouch" }) public void testDelete() throws Exception { for (int i = 0; i < loops; i++) { delete(i, evCache); } } @Test(dependsOnMethods = { "testDelete" }) public void testAdd() throws Exception { for (int i = 0; i < loops; i++) { add(i, evCache); } } @Test(dependsOnMethods = { "testAdd" }) public void testInsertBinary() throws Exception { for (int i = 0; i < loops; i++) { assertTrue(insertBytes(i, evCache)); } } private boolean insertBytes(int i, EVCache gCache) throws Exception { byte[] val = ("val_" + i).getBytes(); String key = "key_b_" + i; Future<Boolean>[] status = gCache.set(key, val, 24 * 60 * 60); for (Future<Boolean> s : status) { if (log.isDebugEnabled()) log.debug("SET BYTES : key : " + key + "; success = " + s.get() + "; Future = " + s.toString()); if (s.get() == Boolean.FALSE) return false; } return true; } @Test(dependsOnMethods = { "testInsertBinary" }) public void testGetBytes() throws Exception { for (int i = 0; i < loops; i++) { String key = "key_b_" + i; byte[] value = evCache.<byte[]> get(key); if(value != null) { if (log.isDebugEnabled()) log.debug("get : key : " + key + " val length = " + value.length); } assertNotNull(value); } } @Test(dependsOnMethods = { "testGetBytes" }) public void testInsert() throws Exception { for (int i = 0; i < loops; i++) { assertTrue(insert(i, evCache)); } } @Test(dependsOnMethods = { "testInsert" }) public void testGet() throws Exception { for (int i = 0; i < loops; i++) { final String val = get(i, evCache); assertNotNull(val); assertTrue(val.equals("val_" + i)); } } @Test(dependsOnMethods = { "testGet" }) public void testGetAndTouch() throws Exception { for (int i = 0; i < loops; i++) { final String val = getAndTouch(i, evCache); assertNotNull(val); assertTrue(val.equals("val_" + i)); } } @Test(dependsOnMethods = { "testGetAndTouch" }) public void testBulk() throws Exception { final String[] keys = new String[loops]; for (int i = 0; i < loops; i++) { keys[i] = "key_" + i; } Map<String, String> vals = getBulk(keys, evCache); assertNotNull(vals); for (int i = 0; i < keys.length; i++) { String key = keys[i]; String val = vals.get(key); if (val == null) { if (log.isDebugEnabled()) log.debug("key " + key + " returned null"); } else { assertTrue(val.equals("val_" + i)); } } } @Test(dependsOnMethods = { "testBulk" }) public void testBulkAndTouch() throws Exception { final String[] keys = new String[loops]; for (int i = 0; i < loops; i++) { keys[i] = "key_" + i; } Map<String, String> vals = getBulkAndTouch(keys, evCache, 24 * 60 * 60); assertNotNull(vals); for (int i = 0; i < vals.size(); i++) { String key = "key_" + i; String val = vals.get(key); if (val == null) { if (log.isDebugEnabled()) log.debug("key " + key + " returned null"); } else { assertTrue(val.equals("val_" + i)); } } } @Test(dependsOnMethods = { "testInsert" }) public void testGetObservable() throws Exception { for (int i = 0; i < loops; i++) { final String val = getObservable(i, evCache, Schedulers.computation()); assertNotNull(val); assertTrue(val.equals("val_" + i)); } } @Test(dependsOnMethods = { "testGetObservable" }) public void testGetAndTouchObservable() throws Exception { for (int i = 0; i < loops; i++) { final String val = getAndTouchObservable(i, evCache, Schedulers.computation()); assertNotNull(val); assertTrue(val.equals("val_" + i)); } } @Test(dependsOnMethods = { "testGetAndTouchObservable" }) public void waitForCallbacks() throws Exception { Thread.sleep(1000); } @Test(dependsOnMethods = { "waitForCallbacks" }) public void testReplace() throws Exception { for (int i = 0; i < 10; i++) { replace(i, evCache); } } @Test(dependsOnMethods = { "testReplace" }) public void testAppendOrAdd() throws Exception { for (int i = 0; i < loops; i++) { assertTrue(appendOrAdd(i, evCache)); } } private void refreshEVCache() { setupEnv(); testEVCache(); } @Test(dependsOnMethods = {"testAppendOrAdd"}) public void functionalTestsWithAppLevelAndASGLevelHashingScenarios() throws Exception { refreshEVCache(); // no hashing assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get()); doFunctionalTests(false); // hashing at app level propertiesToSet.put(appName + ".hash.key", "true"); refreshEVCache(); assertTrue(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get()); doFunctionalTests(true); propertiesToSet.remove(appName + ".hash.key"); // hashing at app level due to auto hashing as a consequence of a large key propertiesToSet.put(appName + ".auto.hash.keys", "true"); refreshEVCache(); assertTrue(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".auto.hash.keys", Boolean.class).orElse(false).get()); assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get()); testWithLargeKey(); // negative scenario propertiesToSet.remove(appName + ".auto.hash.keys"); refreshEVCache(); assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".auto.hash.keys", Boolean.class).orElse(false).get()); assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get()); assertThrows(IllegalArgumentException.class, () -> { testWithLargeKey(); }); // hashing at app level by choice AND different hashing at each asg Map<String, KeyHasher.HashingAlgorithm> hashingAlgorithmsByServerGroup = new HashMap<>(); propertiesToSet.put(appName + ".hash.key", "true"); refreshEVCache(); assertTrue(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get()); // get server group names, to be used to configure the ASG level hashing properties Map<ServerGroup, List<EVCacheClient>> clientsByServerGroup = manager.getEVCacheClientPool(appName).getAllInstancesByServerGroup(); int i = 0; KeyHasher.HashingAlgorithm hashingAlgorithm = KeyHasher.HashingAlgorithm.values()[0]; for (ServerGroup serverGroup : clientsByServerGroup.keySet()) { // use below logic to have different hashing per asg once the code supports. Currently the code caches the value that it uses for all the asgs // KeyHasher.HashingAlgorithm hashingAlgorithm = KeyHasher.HashingAlgorithm.values()[i++ % KeyHasher.HashingAlgorithm.values().length]; hashingAlgorithmsByServerGroup.put(serverGroup.getName(), hashingAlgorithm); propertiesToSet.put(serverGroup.getName() + ".hash.key", "true"); propertiesToSet.put(serverGroup.getName() + ".hash.algo", hashingAlgorithm.name()); } refreshEVCache(); clientsByServerGroup = manager.getEVCacheClientPool(appName).getAllInstancesByServerGroup(); // validate hashing properties of asgs for (ServerGroup serverGroup : clientsByServerGroup.keySet()) { assertEquals(clientsByServerGroup.get(serverGroup).get(0).getHashingAlgorithm(), hashingAlgorithmsByServerGroup.get(serverGroup.getName())); } doFunctionalTests(true); for (ServerGroup serverGroup : clientsByServerGroup.keySet()) { propertiesToSet.remove(serverGroup.getName()); } } private void testWithLargeKey() throws Exception { StringBuilder sb = new StringBuilder(); for (int i= 0; i < 100; i++) { sb.append(Long.toString(System.currentTimeMillis())); } String key = sb.toString(); String value = UUID.randomUUID().toString(); // set EVCacheLatch latch = evCache.set(key, value, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); // get assertEquals(evCache.get(key), value); } private void doFunctionalTests(boolean isHashingEnabled) throws Exception { String key1 = Long.toString(System.currentTimeMillis()); String value1 = UUID.randomUUID().toString(); // set EVCacheLatch latch = evCache.set(key1, value1, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); // get assertEquals(evCache.get(key1), value1); // replace value1 = UUID.randomUUID().toString(); latch = evCache.replace(key1, value1, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); // get assertEquals(evCache.get(key1), value1); // add a key String key2 = Long.toString(System.currentTimeMillis()); String value2 = UUID.randomUUID().toString(); latch = evCache.add(key2, value2, null, 1000, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); // get assertEquals(evCache.get(key2), value2); // appendoradd - append case String value3 = UUID.randomUUID().toString(); if (isHashingEnabled) { assertThrows(EVCacheException.class, () -> { evCache.appendOrAdd(key2, value3, null, 1000, EVCacheLatch.Policy.ALL); }); } else { latch = evCache.appendOrAdd(key2, value3, null, 1000, EVCacheLatch.Policy.ALL); latch.await(3000, TimeUnit.MILLISECONDS); assertEquals(evCache.get(key2), value2 + value3); } // appendoradd - add case String key3 = Long.toString(System.currentTimeMillis()); String value4 = UUID.randomUUID().toString(); if (isHashingEnabled) { assertThrows(EVCacheException.class, () -> { evCache.appendOrAdd(key3, value4, null, 1000, EVCacheLatch.Policy.ALL); }); } else { latch = evCache.appendOrAdd(key3, value4, null, 1000, EVCacheLatch.Policy.ALL); latch.await(3000, TimeUnit.MILLISECONDS); // get assertEquals(evCache.get(key3), value4); } // append String value5 = UUID.randomUUID().toString(); if (isHashingEnabled) { assertThrows(EVCacheException.class, () -> { evCache.append(key3, value5, 1000); }); } else { Future<Boolean> futures[] = evCache.append(key3, value5, 1000); for (Future future : futures) { assertTrue((Boolean) future.get()); } // get assertEquals(evCache.get(key3), value4 + value5); } String key4 = Long.toString(System.currentTimeMillis()); assertEquals(evCache.incr(key4, 1, 10, 1000), 10); assertEquals(evCache.incr(key4, 10, 10, 1000), 20); // decr String key5 = Long.toString(System.currentTimeMillis()); assertEquals(evCache.decr(key5, 1, 10, 1000), 10); assertEquals(evCache.decr(key5, 20, 10, 1000), 0); // delete latch = evCache.delete(key1, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); latch = evCache.delete(key2, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); latch = evCache.delete(key3, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); latch = evCache.delete(key4, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); latch = evCache.delete(key5, EVCacheLatch.Policy.ALL); latch.await(1000, TimeUnit.MILLISECONDS); // test expiry String key6 = Long.toString(System.currentTimeMillis()); assertEquals(evCache.incr(key6, 1, 10, 5), 10); Thread.sleep(5000); assertNull(evCache.get(key6)); assertNull(evCache.get(key1)); assertNull(evCache.get(key2)); assertNull(evCache.get(key3)); assertNull(evCache.get(key4)); assertNull(evCache.get(key5)); } public void testAll() { try { setupEnv(); testEVCache(); testDelete(); testAdd(); Thread.sleep(500); // testInsertBinary(); testInsert(); int i = 0; while (i++ < loops*1000) { try { testInsert(); testGet(); testGetAndTouch(); testBulk(); testBulkAndTouch(); testGetObservable(); testGetAndTouchObservable(); waitForCallbacks(); testAppendOrAdd(); testTouch(); testDelete(); testInsert(); if(i % 2 == 0) testDelete(); testAdd(); Thread.sleep(100); } catch (Throwable e) { log.error(e.getMessage(), e); } } if (log.isDebugEnabled()) log.debug("All Done!!!. Will exit."); System.exit(0); } catch (Exception e) { e.printStackTrace(); log.error(e.getMessage(), e); } } public void onComplete(EVCacheOperationFuture<String> future) throws Exception { if (log.isDebugEnabled()) log.debug("getl : key : " + future.getKey() + ", val = " + future.get()); } }
4,073
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/EVCacheModule.java
package com.netflix.evcache; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; import com.google.inject.*; import com.netflix.archaius.api.annotations.ConfigurationSource; import com.netflix.evcache.connection.DIConnectionModule; import com.netflix.evcache.connection.IConnectionBuilder; import com.netflix.evcache.event.hotkey.HotKeyListener; import com.netflix.evcache.event.throttle.ThrottleListener; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.evcache.pool.EVCacheNodeList; import com.netflix.evcache.util.EVCacheConfig; import com.netflix.evcache.pool.eureka.DIEVCacheNodeListProvider; import com.netflix.evcache.version.VersionTracker; @Singleton @SuppressWarnings("deprecation") public class EVCacheModule extends AbstractModule { public EVCacheModule() { } @Singleton @ConfigurationSource("evcache") public static class EVCacheModuleConfigLoader { @Inject public EVCacheModuleConfigLoader(Injector injector, EVCacheModule module) { if(injector.getExistingBinding(Key.get(IConnectionBuilder.class)) == null) { module.install(new DIConnectionModule()); } } } @Override protected void configure() { // Make sure connection factory provider Module is initialized in your Module when you init EVCacheModule bind(EVCacheModuleConfigLoader.class).asEagerSingleton(); bind(EVCacheNodeList.class).toProvider(DIEVCacheNodeListProvider.class); bind(EVCacheClientPoolManager.class).asEagerSingleton(); bind(HotKeyListener.class).asEagerSingleton(); bind(ThrottleListener.class).asEagerSingleton(); bind(VersionTracker.class).asEagerSingleton(); requestStaticInjection(EVCacheModuleConfigLoader.class); requestStaticInjection(EVCacheConfig.class); } @Inject EVCacheClientPoolManager manager; @PostConstruct public void init() { if(manager != null) { manager.initAtStartup(); } else { EVCacheClientPoolManager.getInstance().initAtStartup(); } } @PreDestroy public void shutdown() { if(manager != null) { manager.shutdown(); } else { EVCacheClientPoolManager.getInstance().shutdown(); } } @Override public int hashCode() { return getClass().hashCode(); } @Override public boolean equals(Object obj) { return (obj != null) && (obj.getClass() == getClass()); } }
4,074
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/connection/DIConnectionFactoryBuilderProvider.java
package com.netflix.evcache.connection; import com.netflix.archaius.api.Property; import com.netflix.archaius.api.PropertyRepository; import com.netflix.discovery.EurekaClient; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.util.EVCacheConfig; import net.spy.memcached.ConnectionFactory; import javax.inject.Inject; import javax.inject.Provider; public class DIConnectionFactoryBuilderProvider extends ConnectionFactoryBuilder implements Provider<IConnectionBuilder> { private final EurekaClient eurekaClient; private final PropertyRepository props; @Inject public DIConnectionFactoryBuilderProvider(EurekaClient eurekaClient, PropertyRepository props) { this.eurekaClient = eurekaClient; this.props = props; } @Override public ConnectionFactoryBuilder get() { return this; } public int getMaxQueueLength(String appName) { return props.get(appName + ".max.queue.length", Integer.class).orElse(16384).get(); } public int getOPQueueMaxBlockTime(String appName) { return props.get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10).get(); } public Property<Integer> getOperationTimeout(String appName) { return props.get(appName + ".operation.timeout", Integer.class).orElse(2500); } public boolean useBinaryProtocol() { return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.use.binary.protocol", Boolean.class).orElse(true).get(); } public EurekaClient getEurekaClient() { return eurekaClient; } public PropertyRepository getProps() { return props; } @Override public ConnectionFactory getConnectionFactory(EVCacheClient client) { final String appName = client.getAppName(); if(useBinaryProtocol()) return new DIConnectionFactory(client, eurekaClient, getMaxQueueLength(appName), getOperationTimeout(appName), getOPQueueMaxBlockTime(appName)); else return new DIAsciiConnectionFactory(client, eurekaClient, getMaxQueueLength(appName), getOperationTimeout(appName), getOPQueueMaxBlockTime(appName)); } }
4,075
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/connection/DIConnectionModule.java
package com.netflix.evcache.connection; import com.google.inject.AbstractModule; import com.google.inject.Singleton; @Singleton public class DIConnectionModule extends AbstractModule { public DIConnectionModule() { } @Override protected void configure() { bind(IConnectionBuilder.class).toProvider(DIConnectionFactoryBuilderProvider.class); } @Override public int hashCode() { return getClass().hashCode(); } @Override public boolean equals(Object obj) { return (obj != null) && (obj.getClass() == getClass()); } }
4,076
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/connection/DIAsciiConnectionFactory.java
package com.netflix.evcache.connection; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.archaius.api.Property; import com.netflix.discovery.EurekaClient; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.DIEVCacheKetamaNodeLocatorConfiguration; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheNodeLocator; import net.spy.memcached.DefaultHashAlgorithm; import net.spy.memcached.MemcachedNode; import net.spy.memcached.NodeLocator; public class DIAsciiConnectionFactory extends BaseAsciiConnectionFactory { private static Logger log = LoggerFactory.getLogger(DIAsciiConnectionFactory.class); private final EurekaClient eurekaClient; DIAsciiConnectionFactory(EVCacheClient client, EurekaClient eurekaClient, int len, Property<Integer> operationTimeout, long opMaxBlockTime) { super(client, len, operationTimeout, opMaxBlockTime); client.addTag(EVCacheMetricsFactory.CONNECTION, "ASCII"); this.eurekaClient = eurekaClient; if(log.isInfoEnabled()) log.info("Using ASCII Connection Factory!!!"); } @Override public NodeLocator createLocator(List<MemcachedNode> list) { this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new DIEVCacheKetamaNodeLocatorConfiguration(client, eurekaClient)); return locator; } }
4,077
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/connection/DIConnectionFactory.java
package com.netflix.evcache.connection; import com.netflix.archaius.api.Property; import com.netflix.discovery.EurekaClient; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.DIEVCacheKetamaNodeLocatorConfiguration; import com.netflix.evcache.pool.EVCacheClient; import com.netflix.evcache.pool.EVCacheNodeLocator; import net.spy.memcached.DefaultHashAlgorithm; import net.spy.memcached.MemcachedNode; import net.spy.memcached.NodeLocator; import java.util.List; public class DIConnectionFactory extends BaseConnectionFactory { private final EurekaClient eurekaClient; DIConnectionFactory(EVCacheClient client, EurekaClient eurekaClient, int len, Property<Integer> operationTimeout, long opMaxBlockTime) { super(client, len, operationTimeout, opMaxBlockTime); client.addTag(EVCacheMetricsFactory.CONNECTION, "BINARY"); this.eurekaClient = eurekaClient; } @Override public NodeLocator createLocator(List<MemcachedNode> list) { this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new DIEVCacheKetamaNodeLocatorConfiguration(client, eurekaClient)); return locator; } }
4,078
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/version/VersionTracker.java
package com.netflix.evcache.version; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.EVCacheClientPoolManager; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Tag; import javax.inject.Inject; import javax.inject.Singleton; @Singleton public class VersionTracker implements Runnable { private static final Logger log = LoggerFactory.getLogger(VersionTracker.class); private AtomicLong versionGauge; private EVCacheClientPoolManager poolManager; @Inject public VersionTracker(EVCacheClientPoolManager poolManager) { this.poolManager = poolManager; poolManager.getEVCacheScheduledExecutor().schedule(this, 30, TimeUnit.SECONDS); } public void run() { // init the version information if(versionGauge == null) { final String fullVersion; final String jarName; if(this.getClass().getPackage().getImplementationVersion() != null) { fullVersion = this.getClass().getPackage().getImplementationVersion(); } else { fullVersion = "unknown"; } if(this.getClass().getPackage().getImplementationVersion() != null) { jarName = this.getClass().getPackage().getImplementationTitle(); } else { jarName = "unknown"; } if(log.isInfoEnabled()) log.info("fullVersion : " + fullVersion + "; jarName : " + jarName); final List<Tag> tagList = new ArrayList<Tag>(3); tagList.add(new BasicTag("version", fullVersion)); tagList.add(new BasicTag("jarName", jarName)); versionGauge = EVCacheMetricsFactory.getInstance().getLongGauge("evcache-client", tagList); } versionGauge.set(Long.valueOf(1)); poolManager.getEVCacheScheduledExecutor().schedule(this, 30, TimeUnit.SECONDS); } @Override public int hashCode() { return getClass().hashCode(); } @Override public boolean equals(Object obj) { return (obj != null) && (obj.getClass() == getClass()); } }
4,079
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/pool/DIEVCacheKetamaNodeLocatorConfiguration.java
package com.netflix.evcache.pool; import com.netflix.appinfo.InstanceInfo; import com.netflix.discovery.EurekaClient; import com.netflix.discovery.shared.Application; import net.spy.memcached.MemcachedNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.List; public class DIEVCacheKetamaNodeLocatorConfiguration extends EVCacheKetamaNodeLocatorConfiguration { private static final Logger log = LoggerFactory.getLogger(DIEVCacheKetamaNodeLocatorConfiguration.class); private final EurekaClient eurekaClient; public DIEVCacheKetamaNodeLocatorConfiguration(EVCacheClient client, EurekaClient eurekaClient) { super(client); this.eurekaClient = eurekaClient; } /** * Returns the socket address of a given MemcachedNode. * * @param node - The MemcachedNode which we're interested in * @return The socket address of the given node format is of the following * format "publicHostname/privateIp:port" (ex - ec2-174-129-159-31.compute-1.amazonaws.com/10.125.47.114:11211) */ @Override public String getKeyForNode(MemcachedNode node, int repetition) { String result = socketAddresses.get(node); if(result == null) { final SocketAddress socketAddress = node.getSocketAddress(); if(socketAddress instanceof InetSocketAddress) { final InetSocketAddress isa = (InetSocketAddress)socketAddress; if(eurekaClient != null ) { final Application app = eurekaClient.getApplication(client.getAppName()); if(app != null) { final List<InstanceInfo> instances = app.getInstances(); for(InstanceInfo info : instances) { final String hostName = info.getHostName(); if(hostName.equalsIgnoreCase(isa.getHostName())) { final String ip = info.getIPAddr(); result = hostName + '/' + ip + ":11211"; break; } } } else { result = ((InetSocketAddress)socketAddress).getHostName() + '/' + ((InetSocketAddress)socketAddress).getAddress().getHostAddress() + ":11211"; } } else { result = isa.getHostName() + '/' + isa.getAddress().getHostAddress() + ":11211"; } } else { result=String.valueOf(socketAddress); if (result.startsWith("/")) { result = result.substring(1); } } socketAddresses.put(node, result); } if(log.isDebugEnabled()) log.debug("Returning : " + (result + "-" + repetition)); return result + "-" + repetition; } @Override public String toString() { return "DIEVCacheKetamaNodeLocatorConfiguration [" + super.toString() + ", EurekaClient=" + eurekaClient + "]"; } }
4,080
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/pool
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/pool/eureka/EurekaNodeListProvider.java
package com.netflix.evcache.pool.eureka; import com.google.common.net.InetAddresses; import com.netflix.appinfo.AmazonInfo; import com.netflix.appinfo.ApplicationInfoManager; import com.netflix.appinfo.DataCenterInfo; import com.netflix.appinfo.InstanceInfo; import com.netflix.appinfo.InstanceInfo.InstanceStatus; import com.netflix.archaius.api.Property; import com.netflix.archaius.api.PropertyRepository; import com.netflix.discovery.EurekaClient; import com.netflix.discovery.shared.Application; import com.netflix.evcache.metrics.EVCacheMetricsFactory; import com.netflix.evcache.pool.EVCacheClientPool; import com.netflix.evcache.pool.EVCacheNodeList; import com.netflix.evcache.pool.EVCacheServerGroupConfig; import com.netflix.evcache.pool.ServerGroup; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.api.Tag; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; public class EurekaNodeListProvider implements EVCacheNodeList { private static final Logger log = LoggerFactory.getLogger(EurekaNodeListProvider.class); private final EurekaClient _eurekaClient; private PropertyRepository props; private final ApplicationInfoManager applicationInfoManager; @SuppressWarnings("rawtypes") // Archaius2 PropertyRepository does not support ParameterizedTypes private Property<Set> ignoreHosts = null; public EurekaNodeListProvider(ApplicationInfoManager applicationInfoManager, EurekaClient eurekaClient, PropertyRepository props) { this.applicationInfoManager = applicationInfoManager; this._eurekaClient = eurekaClient; this.props = props; } /* * (non-Javadoc) * * @see com.netflix.evcache.pool.EVCacheNodeList#discoverInstances() */ @Override public Map<ServerGroup, EVCacheServerGroupConfig> discoverInstances(String _appName) throws IOException { final Property<Boolean> ignoreAppEurekaStatus = props.get("evcache.ignoreAppEurekaStatus", Boolean.class).orElse(false); if (ignoreAppEurekaStatus.get()) log.info("Not going to consider the eureka status of the application, to initialize evcache client."); if (!ignoreAppEurekaStatus.get() && (applicationInfoManager.getInfo().getStatus() == InstanceStatus.DOWN)) { log.info("Not initializing evcache client as application eureka status is DOWN. " + "One can override this behavior by setting evcache.ignoreAppEurekaStatus property to true, scoped to your application."); return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap(); } /* Get a list of EVCACHE instances from the DiscoveryManager */ final Application app = _eurekaClient.getApplication(_appName); if (app == null) return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap(); final List<InstanceInfo> appInstances = app.getInstances(); final Map<ServerGroup, EVCacheServerGroupConfig> instancesSpecific = new HashMap<ServerGroup, EVCacheServerGroupConfig>(); /* Iterate all the discovered instances to find usable ones */ for (InstanceInfo iInfo : appInstances) { final DataCenterInfo dcInfo = iInfo.getDataCenterInfo(); if (dcInfo == null) { if (log.isErrorEnabled()) log.error("Data Center Info is null for appName - " + _appName); continue; } /* Only AWS instances are usable; bypass all others */ if (DataCenterInfo.Name.Amazon != dcInfo.getName() || !(dcInfo instanceof AmazonInfo)) { log.error("This is not an AWSDataCenter. You will not be able to use Discovery Nodelist Provider. Cannot proceed. " + "DataCenterInfo : {}; appName - {}. Please use SimpleNodeList provider and specify the server groups manually.", dcInfo, _appName); continue; } final AmazonInfo amznInfo = (AmazonInfo) dcInfo; // We checked above if this instance is Amazon so no need to do a instanceof check final String zone = amznInfo.get(AmazonInfo.MetaDataKey.availabilityZone); if(zone == null) { final List<Tag> tagList = new ArrayList<Tag>(3); EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName); tagList.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.NULL_ZONE)); EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.CONFIG, tagList); continue; } final String asgName = iInfo.getASGName(); if(asgName == null) { final List<Tag> tagList = new ArrayList<Tag>(3); EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName); tagList.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.NULL_SERVERGROUP)); EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.CONFIG, tagList); continue; } final Property<Boolean> asgEnabled = props.get(asgName + ".enabled", Boolean.class).orElse(true); if (!asgEnabled.get()) { if(log.isDebugEnabled()) log.debug("ASG " + asgName + " is disabled so ignoring it"); continue; } final Map<String, String> metaInfo = iInfo.getMetadata(); final int evcachePort = Integer.parseInt((metaInfo != null && metaInfo.containsKey("evcache.port")) ? metaInfo.get("evcache.port") : EVCacheClientPool.DEFAULT_PORT); int port = evcachePort; final Property<Boolean> isSecure = props.get(asgName + ".use.secure", Boolean.class) .orElseGet(_appName + ".use.secure") .orElseGet("evcache.use.secure") .orElse(false); if(isSecure.get()) { port = Integer.parseInt((metaInfo != null && metaInfo.containsKey("evcache.secure.port")) ? metaInfo.get("evcache.secure.port") : EVCacheClientPool.DEFAULT_SECURE_PORT); } final ServerGroup serverGroup = new ServerGroup(zone, asgName); final Set<InetSocketAddress> instances; final EVCacheServerGroupConfig config; if (instancesSpecific.containsKey(serverGroup)) { config = instancesSpecific.get(serverGroup); instances = config.getInetSocketAddress(); } else { instances = new HashSet<InetSocketAddress>(); config = new EVCacheServerGroupConfig(serverGroup, instances); instancesSpecific.put(serverGroup, config); //EVCacheMetricsFactory.getInstance().getRegistry().gauge(EVCacheMetricsFactory.getInstance().getRegistry().createId(_appName + "-port", "ServerGroup", asgName, "APP", _appName), Long.valueOf(port)); } /* Don't try to use downed instances */ final InstanceStatus status = iInfo.getStatus(); if (status == null || InstanceStatus.OUT_OF_SERVICE == status || InstanceStatus.DOWN == status) { if (log.isDebugEnabled()) log.debug("The Status of the instance in Discovery is " + status + ". App Name : " + _appName + "; Zone : " + zone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId()); continue; } final InstanceInfo myInfo = applicationInfoManager.getInfo(); final DataCenterInfo myDC = myInfo.getDataCenterInfo(); final AmazonInfo myAmznDC = (myDC instanceof AmazonInfo) ? (AmazonInfo) myDC : null; final String myInstanceId = myInfo.getInstanceId(); final String myIp = myInfo.getIPAddr(); final String myPublicHostName = (myAmznDC != null) ? myAmznDC.get(AmazonInfo.MetaDataKey.publicHostname) : null; boolean isInCloud = false; if (myPublicHostName != null) { isInCloud = myPublicHostName.startsWith("ec2"); } if (!isInCloud) { if (myAmznDC != null && myAmznDC.get(AmazonInfo.MetaDataKey.vpcId) != null) { isInCloud = true; } else { if (myIp.equals(myInstanceId)) { isInCloud = false; } } } final String myZone = (myAmznDC != null) ? myAmznDC.get(AmazonInfo.MetaDataKey.availabilityZone) : null; final String myRegion = (myZone != null) ? myZone.substring(0, myZone.length() - 1) : null; final String region = (zone != null) ? zone.substring(0, zone.length() - 1) : null; final String host = amznInfo.get(AmazonInfo.MetaDataKey.publicHostname); InetSocketAddress address = null; final String vpcId = amznInfo.get(AmazonInfo.MetaDataKey.vpcId); final String localIp = amznInfo.get(AmazonInfo.MetaDataKey.localIpv4); if (log.isDebugEnabled()) log.debug("myZone - " + myZone + "; zone : " + zone + "; myRegion : " + myRegion + "; region : " + region + "; host : " + host + "; vpcId : " + vpcId); if(ignoreHosts == null) ignoreHosts = props.get(_appName + ".ignore.hosts", Set.class).orElse(Collections.emptySet()); if(localIp != null && ignoreHosts.get().contains(localIp)) continue; if(host != null && ignoreHosts.get().contains(host)) continue; if (vpcId != null) { final InetAddress add = InetAddresses.forString(localIp); final InetAddress inetAddress = InetAddress.getByAddress(localIp, add.getAddress()); address = new InetSocketAddress(inetAddress, port); if (log.isDebugEnabled()) log.debug("VPC : localIp - " + localIp + " ; add : " + add + "; inetAddress : " + inetAddress + "; address - " + address + "; App Name : " + _appName + "; Zone : " + zone + "; myZone - " + myZone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId()); } else { if(host != null && host.startsWith("ec2")) { final InetAddress inetAddress = (localIp != null) ? InetAddress.getByAddress(host, InetAddresses.forString(localIp).getAddress()) : InetAddress.getByName(host); address = new InetSocketAddress(inetAddress, port); if (log.isDebugEnabled()) log.debug("myZone - " + myZone + ". host : " + host + "; inetAddress : " + inetAddress + "; address - " + address + "; App Name : " + _appName + "; Zone : " + zone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId()); } else { final String ipToUse = (isInCloud) ? localIp : amznInfo.get(AmazonInfo.MetaDataKey.publicIpv4); final InetAddress add = InetAddresses.forString(ipToUse); final InetAddress inetAddress = InetAddress.getByAddress(ipToUse, add.getAddress()); address = new InetSocketAddress(inetAddress, port); if (log.isDebugEnabled()) log.debug("CLASSIC : IPToUse - " + ipToUse + " ; add : " + add + "; inetAddress : " + inetAddress + "; address - " + address + "; App Name : " + _appName + "; Zone : " + zone + "; myZone - " + myZone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId()); } } instances.add(address); } return instancesSpecific; } }
4,081
0
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/pool
Create_ds/EVCache/evcache-client/src/main/java/com/netflix/evcache/pool/eureka/DIEVCacheNodeListProvider.java
package com.netflix.evcache.pool.eureka; import javax.inject.Inject; import javax.inject.Provider; import com.netflix.archaius.api.PropertyRepository; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.netflix.appinfo.ApplicationInfoManager; import com.netflix.discovery.EurekaClient; import com.netflix.evcache.pool.EVCacheNodeList; import com.netflix.evcache.pool.SimpleNodeListProvider; public class DIEVCacheNodeListProvider implements Provider<EVCacheNodeList> { private static final Logger log = LoggerFactory.getLogger(DIEVCacheNodeListProvider.class); private final EurekaClient eurekaClient; private PropertyRepository props; private final ApplicationInfoManager applicationInfoManager; @Inject public DIEVCacheNodeListProvider(ApplicationInfoManager applicationInfoManager, EurekaClient eurekaClient, PropertyRepository props) { this.applicationInfoManager = applicationInfoManager; this.eurekaClient = eurekaClient; this.props = props; } @Override public EVCacheNodeList get() { final EVCacheNodeList provider; if (props.get("evcache.use.simple.node.list.provider", Boolean.class).orElse(false).get()) { provider = new SimpleNodeListProvider(); } else { provider = new EurekaNodeListProvider(applicationInfoManager, eurekaClient, props); } if(log.isDebugEnabled()) log.debug("EVCache Node List Provider : " + provider); return provider; } }
4,082
0
Create_ds/neptune-export/src/test/java/org/apache/tinkerpop/gremlin
Create_ds/neptune-export/src/test/java/org/apache/tinkerpop/gremlin/driver/LBAwareSigV4WebSocketChannelizerTest.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.tinkerpop.gremlin.driver; import com.amazonaws.services.neptune.auth.HandshakeRequestConfig; import com.amazonaws.services.neptune.auth.LBAwareAwsSigV4ClientHandshaker; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelPipeline; import io.netty.channel.embedded.EmbeddedChannel; import org.apache.tinkerpop.gremlin.driver.handler.WebSocketClientHandler; import org.junit.Test; import java.net.URI; import java.net.URISyntaxException; import java.util.Collections; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LBAwareSigV4WebSocketChannelizerTest { @Test public void configureShouldAddSigV4HandshakerToPipeline() throws URISyntaxException { System.setProperty("SERVICE_REGION", "us-west-2"); ChannelPipeline mockedPipeline = new EmbeddedChannel().pipeline(); LBAwareSigV4WebSocketChannelizer channelizer = new LBAwareSigV4WebSocketChannelizer(); Connection mockedConnection = mock(Connection.class); Cluster mockedCluster = mock(Cluster.class); when(mockedConnection.getCluster()).thenReturn(mockedCluster); when(mockedConnection.getUri()).thenReturn(new URI("ws:localhost")); when(mockedCluster.connectionPoolSettings()).thenReturn(mock(Settings.ConnectionPoolSettings.class)); when(mockedCluster.authProperties()).thenReturn(new AuthProperties().with(AuthProperties.Property.JAAS_ENTRY, new HandshakeRequestConfig(Collections.emptyList(), 8182, false).value())); channelizer.init(mockedConnection); channelizer.configure(mockedPipeline); ChannelHandler handler = mockedPipeline.get(LBAwareSigV4WebSocketChannelizer.WEB_SOCKET_HANDLER); assertTrue(handler instanceof WebSocketClientHandler); assertTrue(((WebSocketClientHandler) handler).handshaker() instanceof LBAwareAwsSigV4ClientHandshaker); } }
4,083
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/AbstractExportIntegrationTest.java
package com.amazonaws.services.neptune; import com.amazonaws.services.neptune.propertygraph.Label; import com.amazonaws.services.neptune.propertygraph.io.JsonResource; import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas; import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType; import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVParser; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.stream.Collectors; public abstract class AbstractExportIntegrationTest { protected static String neptuneEndpoint; protected File outputDir; @Rule public TemporaryFolder tempFolder = new TemporaryFolder(); @BeforeClass public static void setupClass(){ neptuneEndpoint = System.getenv("NEPTUNE_ENDPOINT"); assertNotNull("endpoint must be provided through \"NEPTUNE_ENDPOINT\" environment variable", neptuneEndpoint); fillDbWithTestData(neptuneEndpoint); } @Before public void setup() throws IOException { outputDir = tempFolder.newFolder(); } private static void fillDbWithTestData(final String neptuneEndpoint) { //TODO:: For now assume that correct data is pre-loaded into DB. // Cluster cluster = Cluster.build(neptuneEndpoint).enableSsl(true).create(); // GraphTraversalSource g = traversal().withRemote(DriverRemoteConnection.using(cluster, "g")); } protected void assertEquivalentResults(final File expected, final File actual) { GraphSchema config = null; try { config = new JsonResource<GraphSchema, Boolean>( "Config file", new URI(expected.getPath() + "/config.json"), GraphSchema.class).get(); } catch (IOException e) { throw new RuntimeException(e); } catch (URISyntaxException e) { throw new RuntimeException(e); } assertTrue("stats.json does not match expected results", areJsonContentsEqual(expected.listFiles((dir, name) -> name.equals("stats.json"))[0], actual.listFiles((dir, name) -> name.equals("stats.json"))[0])); assertTrue("config.json does not match expected results", areJsonContentsEqual(expected.listFiles((dir, name) -> name.equals("config.json"))[0], actual.listFiles((dir, name) -> name.equals("config.json"))[0])); if (expected.listFiles(((dir, name) -> name.equals("nodes"))).length >= 1) { assertTrue("nodes directory does not match expected results", areDirContentsEquivalent(expected + "/nodes", actual + "/nodes", config)); } if (expected.listFiles(((dir, name) -> name.equals("edges"))).length >= 1) { assertTrue("edges directory does not match expected results", areDirContentsEquivalent(expected + "/edges", actual + "/edges", config)); } } protected boolean areJsonContentsEqual(final File expected, final File actual) { final ObjectMapper mapper = new ObjectMapper(); try { JsonNode expectedTree = mapper.readTree(expected); JsonNode actualTree = mapper.readTree(actual); return expectedTree.equals(actualTree); } catch (IOException e) { throw new RuntimeException(e); } } protected boolean areDirContentsEquivalent(final String expectedPath, final String actualPath, final GraphSchema config) { final File expectedDir = new File(expectedPath); final File actualDir = new File(actualPath); assertTrue("Expected path to a directory", expectedDir.isDirectory() && actualDir.isDirectory()); GraphElementSchemas schemas; if(expectedDir.getName().equals("nodes")) { if(!config.hasNodeSchemas()) { return true; } schemas = config.graphElementSchemasFor(GraphElementType.nodes); } else if(expectedDir.getName().equals("edges")) { if(!config.hasEdgeSchemas()) { return true; } schemas = config.graphElementSchemasFor(GraphElementType.edges); } else { throw new IllegalArgumentException("directory must end in either /nodes or /edges"); } for(Label l : schemas.labels()) { String label = l.fullyQualifiedLabel(); if(!areLabelledDirContentsEquivalent(expectedDir, actualDir, label)) { return false; } } return true; } protected boolean areLabelledDirContentsEquivalent(final File expectedDir, final File actualDir, final String label) { final String escapedLabel = label.replaceAll("\\(", "%28").replaceAll("\\)", "%29"); final List<String> expectedNodes = new ArrayList<>(); final List<String> actualNodes = new ArrayList<>(); for(File file : expectedDir.listFiles((dir, name) -> name.startsWith(escapedLabel))){ try { CSVParser parser = CSVParser.parse(file, StandardCharsets.UTF_8, CSVFormat.RFC4180); Collection<String> list = parser.stream() .map(csvRecord -> csvRecord.toList().toString()) .collect(Collectors.toList()); expectedNodes.addAll(list); } catch (IOException e) { throw new RuntimeException(e); } } for(File file : actualDir.listFiles((dir, name) -> name.startsWith(escapedLabel))){ try { CSVParser parser = CSVParser.parse(file, StandardCharsets.UTF_8, CSVFormat.RFC4180); Collection<String> list = parser.stream() .map(csvRecord -> csvRecord.toList().toString()) .collect(Collectors.toList()); actualNodes.addAll(list); } catch (IOException e) { throw new RuntimeException(e); } } return expectedNodes.containsAll(actualNodes) && actualNodes.containsAll(expectedNodes); } }
4,084
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/CreatePgConfigIntegrationTest.java
package com.amazonaws.services.neptune; import com.amazonaws.services.neptune.export.NeptuneExportRunner; import org.junit.Test; import java.io.File; public class CreatePgConfigIntegrationTest extends AbstractExportIntegrationTest{ @Test public void testCreatePgConfig() { final String[] command = {"create-pg-config", "-e", neptuneEndpoint, "-d", outputDir.getPath()}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testCreatePgConfig"), resultDir); } @Test public void testCreatePgConfigWithGremlinFilter() { final String[] command = {"create-pg-config", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--gremlin-filter", "has(\"runways\", 2)"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testCreatePgConfigWithGremlinFilter"), resultDir); } @Test public void testCreatePgConfigWithEdgeGremlinFilter() { final String[] command = {"create-pg-config", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--gremlin-filter", "hasLabel(\"route\")"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testCreatePgConfigWithEdgeGremlinFilter"), resultDir); } @Test public void testCreatePgConfigWithEdgeGremlinFilterAndEarlyGremlinFilter() { final String[] command = {"create-pg-config", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--gremlin-filter", "hasLabel(\"route\")", "--filter-edges-early"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testCreatePgConfigWithEdgeGremlinFilter"), resultDir); } }
4,085
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/ExportPgFromQueriesIntegrationTest.java
package com.amazonaws.services.neptune; import com.amazonaws.services.neptune.export.NeptuneExportRunner; import com.amazonaws.services.neptune.propertygraph.io.JsonResource; import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema; import org.junit.Test; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import static org.junit.Assert.assertTrue; public class ExportPgFromQueriesIntegrationTest extends AbstractExportIntegrationTest{ @Test public void testExportPgFromQueries() { final String[] command = {"export-pg-from-queries", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "-q", "airport=g.V().hasLabel('airport').has('runways', gt(2)).project('code', 'runways', 'city', 'country').by('code').by('runways').by('city').by('country')" }; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportPgFromQueries"), resultDir); } @Test public void testExportPgFromQueriesWithStructuredOutput() { final String[] command = {"export-pg-from-queries", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "-q", "airport=g.V().union(hasLabel('airport'), outE()).elementMap()", "--include-type-definitions", "--structured-output" }; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentStructuredOutput(new File("src/test/resources/IntegrationTest/testExportPgFromQueriesStructuredOutput"), resultDir); } @Test public void testExportPgFromQueriesWithStructuredOutputWithEdgeAndVertexLabels() { final String[] command = {"export-pg-from-queries", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "-q", "airport=g.V().union(hasLabel('airport'), outE()).elementMap()", "--include-type-definitions", "--edge-label-strategy", "edgeAndVertexLabels", "--structured-output" }; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentStructuredOutput(new File("src/test/resources/IntegrationTest/testExportPgFromQueriesStructuredOutput"), resultDir); } @Override protected void assertEquivalentResults(final File expected, final File actual) { assertTrue("queries.json does not match expected results", areJsonContentsEqual(expected.listFiles((dir, name) -> name.equals("queries.json"))[0], actual.listFiles((dir, name) -> name.equals("queries.json"))[0])); for (File expectedResultsDir : expected.listFiles((dir, name) -> name.equals("results"))[0].listFiles()) { assertTrue(expectedResultsDir.isDirectory()); String dirName = expectedResultsDir.getName(); assertTrue("results/"+dirName+" directory does not match expected results", areLabelledDirContentsEquivalent(expectedResultsDir, new File(actual+"/results/"+dirName), dirName)); } } private void assertEquivalentStructuredOutput(final File expected, final File actual) { GraphSchema config = null; try { config = new JsonResource<GraphSchema, Boolean>( "Config file", new URI(expected.getPath() + "/config.json"), GraphSchema.class).get(); } catch (IOException e) { throw new RuntimeException(e); } catch (URISyntaxException e) { throw new RuntimeException(e); } if (expected.listFiles(((dir, name) -> name.equals("nodes"))).length >= 1) { assertTrue("nodes directory does not match expected results", areDirContentsEquivalent(expected + "/nodes", actual + "/nodes", config)); } } }
4,086
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/ExportPgFromConfigIntegrationTest.java
package com.amazonaws.services.neptune; import com.amazonaws.services.neptune.export.NeptuneExportRunner; import org.junit.Test; import java.io.File; public class ExportPgFromConfigIntegrationTest extends AbstractExportIntegrationTest{ @Test public void testExportPgFromConfig() { final String[] command = {"export-pg-from-config", "-e", neptuneEndpoint, "-c", "src/test/resources/IntegrationTest/ExportPgFromConfigIntegrationTest/input/config.json", "-d", outputDir.getPath()}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/ExportPgFromConfigIntegrationTest/testExportPgFromConfig"), resultDir); } @Test public void testExportPgFromConfigWithGremlinFilter() { final String[] command = {"export-pg-from-config", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "-c", "src/test/resources/IntegrationTest/ExportPgFromConfigIntegrationTest/input/config.json", "--gremlin-filter", "has(\"runways\", 2)"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportPgToCsvWithGremlinFilter"), resultDir); } @Test public void testExportEdgesFromConfigWithGremlinFilter() { final String[] command = {"export-pg-from-config", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "-c", "src/test/resources/IntegrationTest/ExportPgFromConfigIntegrationTest/input/config.json", "--gremlin-filter", "hasLabel(\"route\")"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportEdgesToCsvWithGremlinFilter"), resultDir); } @Test public void testExportEdgesFromConfigWithGremlinFilterWithEarlyGremlinFilter() { final String[] command = {"export-pg-from-config", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "-c", "src/test/resources/IntegrationTest/ExportPgFromConfigIntegrationTest/input/config.json", "--gremlin-filter", "hasLabel(\"route\")", "--filter-edges-early"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportEdgesToCsvWithGremlinFilter"), resultDir); } }
4,087
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/ExportPgIntegrationTest.java
package com.amazonaws.services.neptune; import com.amazonaws.services.neptune.export.NeptuneExportRunner; import org.junit.Test; import java.io.File; public class ExportPgIntegrationTest extends AbstractExportIntegrationTest{ @Test public void testExportPgToCsv() { final String[] command = {"export-pg", "-e", neptuneEndpoint, "-d", outputDir.getPath()}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportPgToCsv"), resultDir); } @Test public void testExportPgWithEdgeAndVertexLabels() { final String[] command = {"export-pg", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--edge-label-strategy", "edgeAndVertexLabels"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportPgWithEdgeAndVertexLabels"), resultDir); } @Test public void testExportPgToCsvWithJanus() { final String[] command = {"export-pg", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--janus"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportPgToCsv"), resultDir); } @Test public void testExportPgToCsvWithGremlinFilter() { final String[] command = {"export-pg", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--gremlin-filter", "has(\"runways\", 2)"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportPgToCsvWithGremlinFilter"), resultDir); } @Test public void testExportEdgesToCsvWithGremlinFilter() { final String[] command = {"export-pg", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--gremlin-filter", "hasLabel(\"route\")"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportEdgesToCsvWithGremlinFilter"), resultDir); } @Test public void testExportEdgesToCsvWithGremlinFilterWithEarlyGremlinFilter() { final String[] command = {"export-pg", "-e", neptuneEndpoint, "-d", outputDir.getPath(), "--gremlin-filter", "hasLabel(\"route\")", "--filter-edges-early"}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertEquivalentResults(new File("src/test/resources/IntegrationTest/testExportEdgesToCsvWithGremlinFilter"), resultDir); } }
4,088
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/ExportRdfIntegrationTest.java
package com.amazonaws.services.neptune; import com.amazonaws.services.neptune.export.NeptuneExportRunner; import org.eclipse.rdf4j.rio.RDFFormat; import org.eclipse.rdf4j.rio.RDFParser; import org.eclipse.rdf4j.rio.Rio; import org.eclipse.rdf4j.rio.helpers.StatementCollector; import org.junit.Test; import static org.junit.Assert.assertTrue; import java.io.File; import java.io.FileInputStream; import java.util.ArrayList; public class ExportRdfIntegrationTest extends AbstractExportIntegrationTest{ @Test public void testExportRdf() { final String[] command = {"export-rdf", "-e", neptuneEndpoint, "-d", outputDir.getPath()}; final NeptuneExportRunner runner = new NeptuneExportRunner(command); runner.run(); final File resultDir = outputDir.listFiles()[0]; assertTrue("Returned statements don't match expected", areStatementsEqual("src/test/resources/IntegrationTest/testExportRdf/statements/statements.ttl", resultDir+"/statements/statements.ttl")); } private boolean areStatementsEqual(final String expected, final String actual) { final ArrayList expectedStatements = new ArrayList(); final ArrayList actualStatements = new ArrayList(); final RDFParser rdfParser = Rio.createParser(RDFFormat.TURTLE); rdfParser.setRDFHandler(new StatementCollector(expectedStatements)); try { rdfParser.parse(new FileInputStream(expected)); } catch (Exception e) { } rdfParser.setRDFHandler(new StatementCollector(actualStatements)); try { rdfParser.parse(new FileInputStream(actual)); } catch (Exception e) { } return expectedStatements.containsAll(actualStatements) && actualStatements.containsAll(expectedStatements); } }
4,089
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/cluster/InstanceTypeTest.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.cluster; import org.junit.Test; import static org.junit.Assert.*; public class InstanceTypeTest { @Test public void shouldAllowNameWithOutWithDBPrefix(){ assertEquals(InstanceType.db_r5_8xlarge, InstanceType.parse("db.r5.8xlarge")); assertEquals(InstanceType.db_r5_8xlarge, InstanceType.parse("r5.8xlarge")); assertEquals(InstanceType.db_r5d_12xlarge, InstanceType.parse("db.r5d.12xlarge")); assertEquals(InstanceType.db_r5d_12xlarge, InstanceType.parse("r5d.12xlarge")); } @Test public void shouldRecognizeR6gInstanceTypePrefix(){ assertEquals(InstanceType.parse("db.r6g.16xlarge").concurrency(), 128); } }
4,090
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/cluster/GetLastEventIdTest.java
package com.amazonaws.services.neptune.cluster; import org.junit.Assert; import org.junit.Test; import static org.junit.Assert.*; public class GetLastEventIdTest { @Test public void shouldReturnIntegerMaxValueForEngineVersions1041AndBelow(){ String expectedValue = String.valueOf(Integer.MAX_VALUE); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.1.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.1.1")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.1.2")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.2.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.2.1")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.2.2")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.3.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.4.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.4.1")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.4.2")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.5.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.5.1")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.1.0.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.1.1.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.2.0.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.2.0.1")); } @Test public void shouldReturnLongMaxValueForEngineVersions1041AndBelow(){ String expectedValue = String.valueOf(Long.MAX_VALUE); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.1.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.1.1")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.1.2")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.2.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.2.1")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.2.2")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.3.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.4.0")); Assert.assertNotEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.4.1")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.4.2")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.5.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.0.5.1")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.1.0.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.1.1.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.2.0.0")); Assert.assertEquals(expectedValue, GetLastEventId.MaxCommitNumValueForEngine("1.2.0.1")); } }
4,091
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/util/MapUtils.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.util; import java.util.HashMap; import java.util.Map; public class MapUtils { public static Map<?, ?> map(Entry... entries) { HashMap<Object, Object> map = new HashMap<>(); for (Entry entry : entries) { map.put(entry.key(), entry.value()); } return map; } public static Entry entry(String key, Object value) { return new Entry(key, value); } public static class Entry { private final String key; private final Object value; private Entry(String key, Object value) { this.key = key; this.value = value; } public String key() { return key; } public Object value() { return value; } } }
4,092
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/util/AWSCredentialsUtilTest.java
package com.amazonaws.services.neptune.util; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException; import org.junit.Before; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; import java.io.File; import java.io.IOException; import static com.amazonaws.services.neptune.util.AWSCredentialsUtil.getProfileCredentialsProvider; import static com.amazonaws.services.neptune.util.AWSCredentialsUtil.getSTSAssumeRoleCredentialsProvider; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; public class AWSCredentialsUtilTest { TemporaryFolder tempFolder; File credentialsFile; @Before public void setup() throws IOException { tempFolder = new TemporaryFolder(); tempFolder.create(); credentialsFile = tempFolder.newFile("credentialsFile"); } @Test public void shouldGetDefaultCredsIfConfigIsNull() { AWSCredentialsProvider provider = getProfileCredentialsProvider(null, null); assertTrue(provider instanceof DefaultAWSCredentialsProviderChain); } @Test public void shouldAttemptToUseProvidedPath() { Throwable t = assertThrows(IllegalArgumentException.class, () -> getProfileCredentialsProvider( null, tempFolder.getRoot().getAbsolutePath()+"/non-existent-file").getCredentials()); assertEquals("AWS credential profiles file not found in the given path: "+ tempFolder.getRoot().getAbsolutePath()+"/non-existent-file", t.getMessage()); } @Test public void shouldUseDefaultCredsIfProfileNameNull() { Throwable t = assertThrows(IllegalArgumentException.class, () -> getProfileCredentialsProvider( null, credentialsFile.getAbsolutePath()).getCredentials()); assertTrue(t.getMessage().contains("No AWS profile named 'default'")); } @Test public void shouldAttemptToUseProvidedProfileName() { Throwable t = assertThrows(IllegalArgumentException.class, () -> getProfileCredentialsProvider( "test", credentialsFile.getAbsolutePath()).getCredentials()); assertTrue(t.getMessage().contains("No AWS profile named 'test'")); } @Test public void shouldUseSourceCredsProviderWhenAssumingRole() { AWSCredentialsProvider mockSourceCredsProvider = mock(AWSCredentialsProvider.class); try { getSTSAssumeRoleCredentialsProvider("fakeARN", "sessionName", null, mockSourceCredsProvider, "us-west-2") .getCredentials(); } catch (AWSSecurityTokenServiceException e) {} //Expected to fail as sourceCredsProvider does not have permission to assume role Mockito.verify(mockSourceCredsProvider, Mockito.atLeast(1)).getCredentials(); } }
4,093
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/util/TransferManagerWrapperTest.java
package com.amazonaws.services.neptune.util; import com.amazonaws.SdkClientException; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AnonymousAWSCredentials; import org.junit.Test; import org.mockito.internal.verification.AtLeast; import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TransferManagerWrapperTest { private final String REGION = "us-west-2"; @Test public void shouldHandleNullCredentialsProvider() { TransferManagerWrapper wrapper = new TransferManagerWrapper(REGION, null); assertNotNull(wrapper); assertNotNull(wrapper.get()); assertNotNull(wrapper.get().getAmazonS3Client()); } @Test public void shouldUseProvidedCredentials() { AWSCredentialsProvider mockCredentialsProvider = mock(AWSCredentialsProvider.class); when(mockCredentialsProvider.getCredentials()).thenReturn(new AnonymousAWSCredentials()); TransferManagerWrapper wrapper = new TransferManagerWrapper(REGION, mockCredentialsProvider); assertNotNull(wrapper); assertNotNull(wrapper.get()); assertNotNull(wrapper.get().getAmazonS3Client()); //Expected to fail due to invalid credentials. This call is here to force the S3 client to call getCredentials() try { wrapper.get().getAmazonS3Client().listBuckets(); } catch (SdkClientException e) {} verify(mockCredentialsProvider, new AtLeast(1)).getCredentials(); } }
4,094
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/util/SemicolonUtilsTest.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.util; import org.junit.Test; import java.util.Collection; import java.util.Iterator; import static org.junit.Assert.*; public class SemicolonUtilsTest { @Test public void shouldSplitStringOnSemicolons(){ Collection<String> results = SemicolonUtils.split("abc;def;ghi"); assertEquals(3, results.size()); Iterator<String> iterator = results.iterator(); assertEquals("abc", iterator.next()); assertEquals("def", iterator.next()); assertEquals("ghi", iterator.next()); } @Test public void shouldNotSplitOnEscapedSemicolon(){ Collection<String> results = SemicolonUtils.split("abc;d\\;ef;ghi"); assertEquals(3, results.size()); Iterator<String> iterator = results.iterator(); assertEquals("abc", iterator.next()); assertEquals("d\\;ef", iterator.next()); assertEquals("ghi", iterator.next()); } @Test public void shouldUnescapeEscapedSemicolonIfThereAreNoUnescapedSemicolonsInString(){ assertEquals("d;ef", SemicolonUtils.unescape("d\\;ef")); } @Test public void shouldReturnEmptyCollectionForEmptyString(){ Collection<String> collection = SemicolonUtils.split(""); assertEquals(0, collection.size()); } }
4,095
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/util/S3ObjectInfoTest.java
/* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.util; import com.amazonaws.services.s3.Headers; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.SSEAlgorithm; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; public class S3ObjectInfoTest { @Test public void canParseBucketFromURI(){ String s3Uri = "s3://my-bucket/a/b/c"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("my-bucket", s3ObjectInfo.bucket()); } @Test public void canParseKeyWithoutTrailingSlashFromURI(){ String s3Uri = "s3://my-bucket/a/b/c"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/c", s3ObjectInfo.key()); } @Test public void canParseKeyWithTrainlingSlashFromURI(){ String s3Uri = "s3://my-bucket/a/b/c/"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/c/", s3ObjectInfo.key()); } @Test public void canCreateDownloadFileForKeyWithoutTrailingSlash(){ String s3Uri = "s3://my-bucket/a/b/c.txt"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("/temp/c.txt", s3ObjectInfo.createDownloadFile("/temp").getAbsolutePath()); } @Test public void canCreateDownloadFileForKeyWithTrailingSlash(){ String s3Uri = "s3://my-bucket/a/b/c/"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("/temp/c", s3ObjectInfo.createDownloadFile("/temp").getAbsolutePath()); } @Test public void canCreateNewInfoForKeyWithoutTrailingSlash() { String s3Uri = "s3://my-bucket/a/b/c"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/c/dir", s3ObjectInfo.withNewKeySuffix("dir").key()); } @Test public void canCreateNewKeyForKeyWithTrailingSlash() { String s3Uri = "s3://my-bucket/a/b/c/"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/c/dir", s3ObjectInfo.withNewKeySuffix("dir").key()); } @Test public void canReplacePlaceholderInKey() { String s3Uri = "s3://my-bucket/a/b/_COMPLETION_ID_/manifest.json"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/123/manifest.json", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123").key()); } @Test public void canReplaceTmpPlaceholderInKey() { String s3Uri = "s3://my-bucket/a/b/tmp/manifest.json"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/failed/manifest.json", s3ObjectInfo.replaceOrAppendKey("/tmp/", "/failed/").key()); } @Test public void canAppendSuffixIfNoPlaceholder() { String s3Uri = "s3://my-bucket/a/b/"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/123", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123").key()); } @Test public void canAppendAltSuffixIfNoPlaceholder() { String s3Uri = "s3://my-bucket/a/b/"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("a/b/123.json", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123", "123.json").key()); } @Test public void canHandlePathsWithBucketNameOnlyNoSlash(){ String s3Uri = "s3://my-bucket"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("", s3ObjectInfo.key()); assertEquals("s3://my-bucket/new-suffix", s3ObjectInfo.withNewKeySuffix("new-suffix").toString()); assertEquals("new-suffix", s3ObjectInfo.withNewKeySuffix("new-suffix").key()); assertEquals("/123", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123").key()); assertEquals("/123.json", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123", "123.json").key()); } @Test public void canHandlePathsWithBucketNameWithSlash(){ String s3Uri = "s3://my-bucket/"; S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(s3Uri); assertEquals("", s3ObjectInfo.key()); assertEquals("s3://my-bucket/new-suffix", s3ObjectInfo.withNewKeySuffix("new-suffix").toString()); assertEquals("new-suffix", s3ObjectInfo.withNewKeySuffix("new-suffix").key()); assertEquals("/123", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123").key()); assertEquals("/123.json", s3ObjectInfo.replaceOrAppendKey("_COMPLETION_ID_", "123", "123.json").key()); } @Test public void canSetContentLengthAndDefaultEncryptionTypeProperlyWithEmptyKey(){ long testLength = 100; String testKeyId = ""; ObjectMetadata objectMetadata = S3ObjectInfo.createObjectMetadata(testLength, testKeyId); assertEquals(testLength, objectMetadata.getContentLength()); assertEquals(SSEAlgorithm.AES256.getAlgorithm(), objectMetadata.getSSEAlgorithm()); assertNull(objectMetadata.getSSEAwsKmsKeyId()); } @Test public void canSetContentLengthAndDefaultEncryptionTypeProperlyWithBlankKey(){ long testLength = 100; String testKeyId = " "; ObjectMetadata objectMetadata = S3ObjectInfo.createObjectMetadata(testLength, testKeyId); assertEquals(testLength, objectMetadata.getContentLength()); assertEquals(SSEAlgorithm.AES256.getAlgorithm(), objectMetadata.getSSEAlgorithm()); assertNull(objectMetadata.getSSEAwsKmsKeyId()); } @Test public void canSetContentLengthAndDefaultEncryptionTypeProperlyWithNullKey(){ long testLength = 100; ObjectMetadata objectMetadata = S3ObjectInfo.createObjectMetadata(testLength, null); assertEquals(testLength, objectMetadata.getContentLength()); assertEquals(SSEAlgorithm.AES256.getAlgorithm(), objectMetadata.getSSEAlgorithm()); assertNull(objectMetadata.getSSEAwsKmsKeyId()); } @Test public void canSetContentLengthAndKmsEncryptionTypeProperlyWithCmkKey(){ long testLength = 100; String testKeyId = "abcdefgh-hijk-0123-4567-0123456789ab"; ObjectMetadata objectMetadata = S3ObjectInfo.createObjectMetadata(testLength, testKeyId); assertEquals(testLength, objectMetadata.getContentLength()); assertEquals(SSEAlgorithm.KMS.getAlgorithm(), objectMetadata.getSSEAlgorithm()); assertEquals(testKeyId, objectMetadata.getSSEAwsKmsKeyId()); } }
4,096
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/io/RecordSplitterTest.java
package com.amazonaws.services.neptune.io; import com.amazonaws.services.neptune.profiles.neptune_ml.JsonFromResource; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import org.junit.Test; import java.io.IOException; import java.util.*; import static org.junit.Assert.assertEquals; public class RecordSplitterTest { private final LargeStreamRecordHandlingStrategy STRATEGY = LargeStreamRecordHandlingStrategy.splitAndShred; @Test public void shouldSplitStringByLength(){ String s = "abcdefghijklmno"; assertStringCollections(Collections.singletonList("abcdefghijklmno"), RecordSplitter.splitByLength(s, 15)); assertStringCollections(Collections.singletonList("abcdefghijklmno"), RecordSplitter.splitByLength(s, 20)); assertStringCollections(Arrays.asList("abcde", "fghij", "klmno"), RecordSplitter.splitByLength(s, 5)); assertStringCollections(Arrays.asList("abcdef", "ghijkl", "mno"), RecordSplitter.splitByLength(s, 6)); } @Test public void shouldSplitStringAttemptToSplitOnWordBoundary(){ String s = " abc defghij klmno "; assertStringCollections(Collections.singletonList("abc defghij klmno"), RecordSplitter.splitByLength(s, 19, 4)); assertStringCollections(Collections.singletonList("abc defghij klmno"), RecordSplitter.splitByLength(s, 24, 4)); assertStringCollections(Arrays.asList("abc", "defgh", "ij", "klmno"), RecordSplitter.splitByLength(s, 5)); assertStringCollections(Arrays.asList("abc", "defghi", "j", "klmno"), RecordSplitter.splitByLength(s, 6, 4)); } @Test public void shouldSplitIntoIndividualNeptuneStreamsPGRecords() throws IOException { TestFixture testFixture = new TestFixture("t1.json", getClass()); RecordSplitter recordSplitter = new RecordSplitter(160, STRATEGY); Collection<String> records = recordSplitter.split(testFixture.input()); assertStringCollections(testFixture.expectedOutputs(), records); } @Test public void shouldSubdivideLongPGRecords() throws IOException { TestFixture testFixture = new TestFixture("t2.json", getClass()); RecordSplitter recordSplitter = new RecordSplitter(160, STRATEGY); Collection<String> records = recordSplitter.split(testFixture.input()); assertStringCollections(testFixture.expectedOutputs(), records); } @Test public void shouldSplitCsvIntoIndividualFields() throws IOException { TestFixture testFixture = new TestFixture("t3.json", getClass()); RecordSplitter recordSplitter = new RecordSplitter(160, STRATEGY); Collection<String> records = recordSplitter.split(testFixture.input()); assertStringCollections(testFixture.expectedOutputs(), records); } @Test public void shouldSubdivideLongCsvFields() throws IOException { TestFixture testFixture = new TestFixture("t4.json", getClass()); RecordSplitter recordSplitter = new RecordSplitter(8, STRATEGY); Collection<String> records = recordSplitter.split(testFixture.input()); assertStringCollections(testFixture.expectedOutputs(), records); } @Test public void shouldSplitIntoIndividualNeptuneStreamsRDFRecords() throws IOException { TestFixture testFixture = new TestFixture("t5.json", getClass()); RecordSplitter recordSplitter = new RecordSplitter(160, STRATEGY); Collection<String> records = recordSplitter.split(testFixture.input()); assertStringCollections(testFixture.expectedOutputs(), records); } @Test public void shouldSubdivideLongRDFRecords() throws IOException { TestFixture testFixture = new TestFixture("t6.json", getClass()); RecordSplitter recordSplitter = new RecordSplitter(140, STRATEGY); Collection<String> records = recordSplitter.split(testFixture.input()); assertStringCollections(testFixture.expectedOutputs(), records); } private void assertStringCollections(Collection<String> expectedOutputs, Collection<String> records) { String msg = String.format("Expected: %s\nActual: %s", expectedOutputs, records); assertEquals(msg, expectedOutputs.size(), records.size()); Iterator<String> expectedIterator = expectedOutputs.iterator(); Iterator<String> recordsIterator = records.iterator(); while (expectedIterator.hasNext()) { String expected = expectedIterator.next(); String actual = recordsIterator.next(); assertEquals(expected, actual); } } private static class TestFixture { private final String input; private final Collection<String> expectedOutputs = new ArrayList<>(); public TestFixture(String filename, Class<?> clazz) throws IOException { JsonNode json = JsonFromResource.get(filename, clazz); this.input = json.get("input").toString(); ArrayNode output = (ArrayNode) json.get("output"); for (JsonNode jsonNode : output) { expectedOutputs.add(jsonNode.toString()); } } public String input() { return input; } public Collection<String> expectedOutputs() { return expectedOutputs; } } }
4,097
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/io/DirectoriesTest.java
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.amazonaws.services.neptune.io; import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphExportFormat; import org.junit.Assert; import org.junit.Test; import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.concurrent.atomic.AtomicInteger; import static org.junit.Assert.*; public class DirectoriesTest { @Test public void replacesForbiddenCharactersInFilename() throws UnsupportedEncodingException { String filename = "(Person;Staff;Temp\\;Holidays)-works_for-(Admin;Perm;Person)"; String updated = Directories.fileName(filename, new AtomicInteger()); assertEquals("%28Person%3BStaff%3BTemp%5C%3BHolidays%29-works_for-%28Admin%3BPerm%3BPerson%29-1", updated); } @Test public void createsDigestFilePathsForVeryLongFilenames() throws IOException { Path path = Paths.get("/export"); String longName = "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890"; Directories directories = Directories.createFor(DirectoryStructure.PropertyGraph, new File("home"), "export-id", "", ""); Path filePath = directories.createFilePath(path, longName, PropertyGraphExportFormat.csv); assertEquals("/export/8044f12c352773b7ff400ef524da6e90db419e4a.csv", filePath.toString()); } }
4,098
0
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune
Create_ds/neptune-export/src/test/java/com/amazonaws/services/neptune/io/KinesisConfigTest.java
package com.amazonaws.services.neptune.io; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.neptune.cli.AbstractTargetModule; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThrows; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class KinesisConfigTest { private AbstractTargetModule target; @Before public void resetTargetModule() { target = spy(AbstractTargetModule.class); } @Test public void shouldCreateStreamIfNameAndRegionAreProvided() { when(target.getStreamName()).thenReturn("test"); when(target.getRegion()).thenReturn("us-west-2"); KinesisConfig config = new KinesisConfig(target); assertNotNull(config.stream()); } @Test public void shouldNotCreateStreamIfNameNotProvided() { when(target.getStreamName()).thenReturn(""); when(target.getRegion()).thenReturn("us-west-2"); KinesisConfig config = new KinesisConfig(target); Throwable t = assertThrows(IllegalArgumentException.class, () -> config.stream()); assertEquals("You must supply an AWS Region and Amazon Kinesis Data Stream name", t.getMessage()); } @Test public void shouldNotCreateStreamIfRegionNotProvided() { when(target.getStreamName()).thenReturn("test"); when(target.getRegion()).thenReturn(""); KinesisConfig config = new KinesisConfig(target); Throwable t = assertThrows(IllegalArgumentException.class, () -> config.stream()); assertEquals("You must supply an AWS Region and Amazon Kinesis Data Stream name", t.getMessage()); } @Test public void shouldUseProvidedCredentialsProvider() { when(target.getStreamName()).thenReturn("test"); when(target.getRegion()).thenReturn("us-west-2"); AWSCredentialsProvider mockedCredsProvider = mock(AWSCredentialsProvider.class); when(target.getCredentialsProvider()).thenReturn(mockedCredsProvider); KinesisConfig config = new KinesisConfig(target); config.stream().publish("test"); verify(mockedCredsProvider, Mockito.atLeast(1)).getCredentials(); } }
4,099