index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/TokenPoolTopology.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.logging.Logger;
public class TokenPoolTopology {
private static final org.slf4j.Logger Logger = LoggerFactory.getLogger(TokenPoolTopology.class);
private final ConcurrentHashMap<String, List<TokenStatus>> map = new ConcurrentHashMap<String, List<TokenStatus>>();
private final ConcurrentHashMap<String, Map<Long, Host>> rackTokenHostMap = new ConcurrentHashMap<String, Map<Long, Host>>();
private final int replicationFactor;
public TokenPoolTopology(int replicationFactor) {
this.replicationFactor = replicationFactor;
}
public void addToken(String rack, Long token, HostConnectionPool<?> hostPool) {
List<TokenStatus> list = map.get(rack);
if (list == null) {
list = new ArrayList<TokenStatus>();
map.put(rack, list);
}
list.add(new TokenStatus(token, hostPool));
}
public void addHostToken(String rack, Long token, Host host) {
Logger.info("Adding Host to Topology" + host);
Map<Long, Host> tokenHostMap = rackTokenHostMap.get(rack);
if (tokenHostMap == null) {
tokenHostMap = new HashMap<>();
rackTokenHostMap.put(rack, tokenHostMap);
}
tokenHostMap.put(token, host);
}
public void removeHost(String rack, Long token, Host host) {
Logger.info("Removing Host from Topology" + host);
Map<Long, Host> tokenHostMap = rackTokenHostMap.get(rack);
if (tokenHostMap == null) {
return;
}
tokenHostMap.put(token, null);
}
public ConcurrentHashMap<String, List<TokenStatus>> getAllTokens() {
return map;
}
public int getReplicationFactor() {
return replicationFactor;
}
public String getRandomRack() {
List<String> racks = new ArrayList<String>(rackTokenHostMap.keySet());
Collections.shuffle(racks);
return racks.get(0);
}
public List<TokenStatus> getTokensForRack(String rack) {
if (rack != null && map.containsKey(rack)) {
return map.get(rack);
}
return null;
}
public Map<Long, Host> getTokenHostsForRack(String rack) {
if (rack != null && rackTokenHostMap.containsKey(rack)) {
return rackTokenHostMap.get(rack);
}
return null;
}
public String toString() {
ArrayList<String> keyList = new ArrayList<String>(map.keySet());
Collections.sort(keyList);
StringBuilder sb = new StringBuilder();
sb.append("TokenPoolTopology\n");
for (String key : keyList) {
sb.append("\nRack: " + key + "\n");
List<TokenStatus> list = map.get(key);
Collections.sort(list);
for (TokenStatus token : list) {
sb.append(token.toString()).append("\n");
}
}
return sb.toString();
}
public static class TokenStatus implements Comparable<TokenStatus> {
private Long token;
private HostConnectionPool<?> hostPool;
private TokenStatus(Long t, HostConnectionPool<?> pool) {
token = t;
hostPool = pool;
}
public Long getToken() {
return token;
}
public HostConnectionPool<?> getHostPool() {
return hostPool;
}
@Override
public int compareTo(TokenStatus o) {
return this.token.compareTo(o.token);
}
public String toString() {
return token + " ==> " + hostPool.toString();
}
}
}
| 6,000 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/MultiKeyCompressionOperation.java | package com.netflix.dyno.connectionpool;
/**
* Interface to be used for multi key operations, i.e.
* taking as input an vararg like String...
*
* @param <CL>
* @param <R>
* @author ipapapa
*/
public interface MultiKeyCompressionOperation<CL, R> extends Operation<CL, R> {
String[] compressMultiKeyValue(ConnectionContext ctx, String... value);
String decompressValue(ConnectionContext ctx, String value);
} | 6,001 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/TokenRackMapper.java | package com.netflix.dyno.connectionpool;
import java.util.Map;
public interface TokenRackMapper {
Map<Long, String> getTokenRackMap();
String getRackForToken(Long Token);
void setRackForToken(Long token, String rack);
}
| 6,002 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ConnectionPool.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Future;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Base interface for a pool of connections. A concrete connection pool will
* track hosts in a cluster.
*
* @param <CL>
* @author poberai
*/
public interface ConnectionPool<CL> {
/**
* Add a host to the connection pool.
*
* @param host
* @throws DynoException
* @returns True if host was added or false if host already exists
*/
boolean addHost(Host host);
/**
* Remove a host from the connection pool.
*
* @param host
* @throws DynoException
* @returns True if host was added or false if host already exists
*/
boolean removeHost(Host host);
/**
* @param host
* @return Return true if the host is up
*/
boolean isHostUp(Host host);
/**
* @param host
* @return Return true if host is contained within the connection pool
*/
boolean hasHost(Host host);
/**
* @return Return list of active hosts on which connections can be created
*/
List<HostConnectionPool<CL>> getActivePools();
/**
* @return Get all pools
*/
List<HostConnectionPool<CL>> getPools();
/**
* Set the complete set of hosts in the ring
*
* @param activeHosts
* @param inactiveHosts
*/
Future<Boolean> updateHosts(Collection<Host> activeHosts, Collection<Host> inactiveHosts);
/**
* @param host
* @return Return an immutable connection pool for this host
*/
HostConnectionPool<CL> getHostPool(Host host);
/**
* Execute an operation with failover within the context of the connection
* pool. The operation will only fail over for connection pool errors and
* not application errors.
*
* @param <R>
* @param op
* @throws DynoException
*/
<R> OperationResult<R> executeWithFailover(Operation<CL, R> op) throws DynoException;
/**
* Scatter gather style operation
*
* @param op
* @return Collection<OperationResult < R>>
* @throws DynoException
*/
<R> Collection<OperationResult<R>> executeWithRing(TokenRackMapper tokenRackMapper, Operation<CL, R> op) throws DynoException;
/**
* Execute an operation asynchronously.
*
* @param op
* @return ListenableFuture<OperationResult < R>>
* @throws DynoException
*/
<R> ListenableFuture<OperationResult<R>> executeAsync(AsyncOperation<CL, R> op) throws DynoException;
/**
* Shut down the connection pool and terminate all existing connections
*/
void shutdown();
/**
* Setup the connection pool and start any maintenance threads. This includes priming connections
* to server hosts.
*/
Future<Boolean> start() throws DynoException;
/**
* Construct the connection pool but do not start any threads. The pool will poll the {@link HostSupplier}
* once per minute and will start upon finding active hosts.
* <p>
* Note that an {@link IllegalStateException} will be thrown if the connection pool has been successfully
* started.
* </p>
* This api is utilized by DynoJedisClient and DynoRedissonClient when starting the connection pool unless
* {@link ConnectionPoolConfiguration#getFailOnStartupIfNoHosts()} is set to true.
*/
void idle();
/**
* Retrieve the runtime configuration of the connection pool instance.
*
* @return ConnectionPoolConfiguration
*/
ConnectionPoolConfiguration getConfiguration();
/**
* Retrieve an instance of {@link HealthTracker}
*/
HealthTracker<CL> getHealthTracker();
boolean isIdle();
}
| 6,003 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/TokenMapSupplier.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.List;
import java.util.Set;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
/**
* Interface for supplying the list of {@link HostToken} that represent the
* dynomite server topology
*
* @author poberai
*/
public interface TokenMapSupplier {
/**
* @return List<HostToken>
*/
public List<HostToken> getTokens(Set<Host> activeHosts);
/**
* @param host
* @return
*/
public HostToken getTokenForHost(final Host host, final Set<Host> activeHosts);
}
| 6,004 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/HostConnectionPool.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Interface for a pool of {@link Connection}(s) for a single {@link Host}
* <p>
* The interface prescribes certain key features required by clients of this class, such as
* <ol>
* <li> Basic connection pool life cycle management such as prime connections (init) and shutdown </li> <br/>
*
* <li> Basic {@link Connection} life cycle management such as borrow / return / close / markAsDown </li> <br/>
*
* <li> Tracking the {@link Host} associated with the connection pool. </li> <br/>
*
* <li> Visibility into the status of the connection pool and it's connections.
* <ol>
* <li> Tracking status of pool - isConnecting / isActive / isShutdown </li>
* <li> Tracking basic counters for connections - active / pending / blocked / idle / busy / closed etc </li>
* <li> Tracking latency scores for connections to this host. </li>
* <li> Tracking failures for connections to this host. </li>
* </ol>
* </ol>
* <p>
* This class is intended to be used within a collection of {@link HostConnectionPool} tracked by a
* {@link ConnectionPool} for all the {@link Host}(s) within a Dynomite cluster.
*
* @param <CL>
* @author poberai
* @see {@link ConnectionPool} for references to this class.
*/
public interface HostConnectionPool<CL> {
/**
* Borrow a connection from the host. May create a new connection if one is
* not available.
*
* @param duration The amount of time to wait for a connection to become available
* @param unit Specifies the unit of time corresponding to the duration (i.e. seconds vs milliseconds)
* @return A borrowed connection. Connection must be returned either by calling returnConnection
* or closeConnection.
* @throws DynoException
*/
Connection<CL> borrowConnection(int duration, TimeUnit unit) throws DynoException;
/**
* Return a connection to the host's pool. May close the connection if the
* pool is down or the last exception on the connection is determined to be
* fatal.
*
* @param connection
* @return True if connection was closed
*/
boolean returnConnection(Connection<CL> connection);
/**
* Close this connection and update internal state
*
* @param connection
*/
boolean closeConnection(Connection<CL> connection);
/**
* Recycle a connection by closing this and adding a new connection to the pool
*
* @param connection
*/
void recycleConnection(Connection<CL> connection);
/**
* Shut down the host so no more connections may be created when
* borrowConnections is called and connections will be terminated when
* returnConnection is called.
*/
void markAsDown(DynoException reason);
/**
* Recycle all connections in the connection pool.
* Note that the impl should either be able to handle active requests when re-cycling connections
* or should be NOT active when re-cycling connections, so that calling clients can failover to other pools/hosts.
*/
void reconnect();
/**
* Completely shut down this connection pool as part of a client shutdown
*/
void shutdown();
/**
* Create new connections and add them to the pool. Consult ConnectionPoolConfiguration.getMaxConnsPerHost()
*
* @throws DynoException
* @returns Actual number of connections created
*/
int primeConnections() throws DynoException;
/**
* @return Get the host to which this pool is associated
*/
Host getHost();
/**
* @return Return true if the pool is active.
*/
boolean isActive();
/**
* @return Return true if the has been shut down and is no longer accepting traffic.
*/
boolean isShutdown();
/**
* Get all connections for the managed pool underneath.
* USE with EXTREME CAUTION since all vended connections must be returned to the pool
* in order to avoid pool exhaustion.
*
* @return Collection<Connection < CL>>
*/
Collection<Connection<CL>> getAllConnections();
int getConnectionTimeout();
int getSocketTimeout();
int size();
} | 6,005 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/Operation.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Interface that represents a synchronous operation on the connection
*
* @param <CL>
* @param <R>
* @author poberai
*/
public interface Operation<CL, R> extends BaseOperation<CL, R> {
/**
* Execute the operation on the client object and return the results.
*
* @param client - The client object
* @param state - State and metadata specific to the connection
* @return
* @throws DynoException
*/
R execute(CL client, ConnectionContext state) throws DynoException;
}
| 6,006 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/RetryPolicy.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
/**
* Interface for retry policies when executing an {@link Operation}.
*
* @author poberai, jcacciatore
*/
public interface RetryPolicy {
/**
* Operation is starting
*/
void begin();
/**
* Operation has completed successfully
*/
void success();
/**
* Operation has failed
*/
void failure(Exception e);
/**
* Ask the policy if a retry is allowed. Note that this will return false if {@link #success()} has been called.
*
* @return boolean
*/
boolean allowRetry();
/**
* Ask the policy is a retry can use a remote zone (rack)
*
* @return boolean
*/
boolean allowCrossZoneFallback();
/**
* Return the number of attempts since {@link #begin()} was called
*
* @return int
*/
int getAttemptCount();
public static interface RetryPolicyFactory {
public RetryPolicy getRetryPolicy();
}
} | 6,007 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ConnectionPoolMonitor.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.Map;
/**
* Monitoring interface to receive notification of pool events. A concrete
* monitor will make event stats available to a monitoring application and may
* also log events to a log file.
*
* @author poberai
*/
public interface ConnectionPoolMonitor {
/**
* Succeeded in executing an operation
*
* @param host
* @param latency
*/
public void incOperationSuccess(Host host, long latency);
public long getOperationSuccessCount();
/**
* Errors trying to execute an operation.
*
* @param reason
* @param host
*/
public void incOperationFailure(Host host, Exception reason);
public long getOperationFailureCount();
/**
* An operation failed but the connection pool will attempt to fail over to
* another host/connection.
*/
public void incFailover(Host host, Exception reason);
public long getFailoverCount();
/**
* Created a connection successfully
*/
public void incConnectionCreated(Host host);
public long getConnectionCreatedCount();
/**
* Closed a connection
*
* @param reason
*/
public void incConnectionClosed(Host host, Exception reason);
public long getConnectionClosedCount();
/**
* Attempt to create a connection failed
*
* @param host
* @param reason
*/
public void incConnectionCreateFailed(Host host, Exception reason);
public long getConnectionCreateFailedCount();
/**
* Incremented for each connection borrowed
*
* @param host Host from which the connection was borrowed
* @param delay Time spent in the connection pool borrowing the connection
*/
public void incConnectionBorrowed(Host host, long delay);
public long getConnectionBorrowedCount();
/**
* Returns the average latency of the time required to acquire a connection
*
* @return long indicating the time in microseconds
*/
public long getConnectionBorrowedLatMean();
/**
* Returns the average latency of the time required to acquire a connection
*
* @return long indicating the time in microseconds
*/
public long getConnectionBorrowedLatP50();
/**
* Returns the average latency of the time required to acquire a connection
*
* @return long indicating the time in microseconds
*/
public long getConnectionBorrowedLatP99();
/**
* Clears all data from the internal histogram being used to record stats. This is necessary
* if the pool goes into a reconnect state, where all connections are torn down and re-primed.
*/
public void resetConnectionBorrowedLatStats();
/**
* Incremented for each connection returned.
*
* @param host Host to which connection is returned
*/
public void incConnectionReturned(Host host);
public long getConnectionReturnedCount();
/**
* Incremented for each connection recycle.
*
* @param host Host to which connection is returned for recycle
*/
public void incConnectionRecycled(Host host);
public long getConnectionRecycledCount();
/**
* Timeout trying to get a connection from the pool
*/
public long getPoolExhaustedTimeoutCount();
/**
* Timeout waiting for a response from the cluster
*/
public long getOperationTimeoutCount();
/**
* @return Count of socket timeouts trying to execute an operation
*/
public long getSocketTimeoutCount();
/**
* @return Get number of unknown errors
*/
public long getUnknownErrorCount();
/**
* @return Get number of invalid requests (i.e. bad argument values)
*/
public long getBadRequestCount();
/**
* @return Count of times no hosts at all were available to execute an operation.
*/
public long getNoHostCount();
/**
* @return Return the number of hosts in the pool
*/
public long getHostCount();
/**
* @return Return the number of times any host was marked as down.
*/
public long getHostDownCount();
/**
* @return Return the number of active hosts
*/
public long getHostUpCount();
/**
* A host was added and given the associated pool. The pool is immutable and
* can be used to get info about the number of open connections
*
* @param host
* @param pool
*/
public void hostAdded(Host host, HostConnectionPool<?> pool);
/**
* A host was removed from the pool. This is usually called when a downed
* host is removed from the ring.
*
* @param host
*/
public void hostRemoved(Host host);
/**
* A host was identified as downed.
*
* @param host
* @param reason Exception that caused the host to be identified as down
*/
public void hostDown(Host host, Exception reason);
/**
* A host was reactivated after being marked down
*
* @param host
* @param pool
*/
public void hostUp(Host host, HostConnectionPool<?> pool);
/**
* Sets the current total number of hosts tracked by this monitor
*
* @param hostCount
*/
public void setHostCount(long hostCount);
/**
* @return Return a mapping of all hosts and their statistics
*/
public Map<Host, HostConnectionStats> getHostStats();
} | 6,008 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/DecoratingListenableFuture.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class DecoratingListenableFuture<V> implements ListenableFuture<V> {
private final Future<V> innerFuture;
public DecoratingListenableFuture(Future<V> future) {
innerFuture = future;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return innerFuture.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return innerFuture.isCancelled();
}
@Override
public boolean isDone() {
return innerFuture.isDone();
}
@Override
public V get() throws InterruptedException, ExecutionException {
return innerFuture.get();
}
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return innerFuture.get(timeout, unit);
}
@Override
public void addListener(Runnable listener, Executor executor) {
throw new RuntimeException("Not Implemented");
}
}
| 6,009 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ErrorRateMonitorConfig.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.List;
import com.netflix.dyno.connectionpool.impl.health.ErrorRateMonitor;
/**
* Interface for config required by {@link ErrorRateMonitor}
*
* @author poberai
*/
public interface ErrorRateMonitorConfig {
/**
* Size of the window (in seconds) that should be monitored
*
* @return int
*/
public int getWindowSizeSeconds();
/**
* Frequency at which the error rate check should run
*
* @return int
*/
public int getCheckFrequencySeconds();
/**
* Window for suppressing alerts once the alert has been triggered.
* This is useful for suppressing a deluge of alerts once a critical alert has been fired.
*
* @return int
*/
public int getCheckSuppressWindowSeconds();
/**
* Multiple ErrorThresholds to honor
*
* @return List<ErrorThreshold>
*/
public List<ErrorThreshold> getThresholds();
/**
* Interface the describes an isolated error threshold to monitor
*
* @author poberai
*/
public interface ErrorThreshold {
/**
* Error threshold
*
* @return int
*/
public int getThresholdPerSecond();
/**
* Size of window to consider when sampling the error rate
*
* @return int
*/
public int getWindowSeconds();
/**
* How much of the window should be above error threshold in order to generate the alert.
* This is used when the error rates are sparse but still frequent enough to trigger an alert
*
* @return int
*/
public int getWindowCoveragePercentage();
}
}
| 6,010 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/CursorBasedResult.java | /*******************************************************************************
* Copyright 2015 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.List;
/**
* The result of performing a SCAN operation
*/
public interface CursorBasedResult<T> {
List<T> getResult();
List<String> getStringResult();
String getCursorForHost(String host);
boolean isComplete();
}
| 6,011 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ConnectionPoolConfigurationPublisher.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
public interface ConnectionPoolConfigurationPublisher {
enum PublisherType {
ELASTIC
}
/**
* Publishes configuration information to a sink. This is useful for features such as phone-home, so
* that clients and client versions can be easily identified.
*/
void publish();
}
| 6,012 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/TopologyView.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.List;
import java.util.Map;
/**
* Represents a read-only view of the dynomite server topology.
*/
public interface TopologyView {
/**
* Retrieves a read-only view of the server topology
*
* @return An unmodifiable map of server-id to list of token status
*/
Map<String, List<TokenPoolTopology.TokenStatus>> getTopologySnapshot();
/**
* Returns the token for the given key.
*
* @param key The key of the record stored in dynomite
* @return Long The token that owns the given key
*/
Long getTokenForKey(String key);
}
| 6,013 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/HostSupplier.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.List;
/**
* Interface for a supplier of host objects that map to the dynomite cluster. The {@link ConnectionPool} object can use this to
* instantiate {@link Connection}s to the hosts supplied by the host supplier
*
* @author poberai
*/
public interface HostSupplier {
/**
* Return a list of dynomite hosts for the connection pool
*
* @return List<Host>
*/
public List<Host> getHosts();
}
| 6,014 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/Connection.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Interface to an instance of a connection on a host.
*
* @param <CL>
* @author poberai
*/
public interface Connection<CL> {
/**
* Execute an operation on the connection and return a result
*
* @param <R>
* @param op
* @throws DynoException
*/
public <R> OperationResult<R> execute(Operation<CL, R> op) throws DynoException;
public <R> ListenableFuture<OperationResult<R>> executeAsync(AsyncOperation<CL, R> op) throws DynoException;
/**
* Shut down the connection. isOpen() will now return false.
*/
public void close();
/**
* @return Get the host for this connection
*/
public Host getHost();
/**
* Open a new connection
*
* @throws DynoException
*/
public void open() throws DynoException;
/**
* Can be used by clients to indicate connection exception.
* This can be analyzed by connection pools later
* e.g remove host from connection pool etc.
*
* @return
*/
public DynoConnectException getLastException();
/**
* Return the parent HostConnectionPool that manages this connection.
* This is generally useful for meta operations on HostConnectionPool(s) when dealing with their Connection(s)
* e.g
* 1. Return connections safely to the parent pool after executing operations on them
* 2. Reporting error stats to parent pools when performing ops on connections. The parent pools can then
* decide whether the connections should be recycled etc.
*
* @return
*/
public HostConnectionPool<CL> getParentConnectionPool();
/**
* Operation to send ping heartbeats on Connection(s)
* This is primarily used for active monitoring so that stale/bad connections to Hosts can be recycled.
*/
public void execPing();
/**
* Can be used by different layers within dyno to add valuable information to the Connection.
*
* @return ConnectionContext
*/
public ConnectionContext getContext();
}
| 6,015 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/AsyncOperation.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Interface for representing an async operation. The execution of the operation should return
* a future to the result of the execution.
*
* @param <CL>
* @param <R>
* @author poberai
*/
public interface AsyncOperation<CL, R> extends BaseOperation<CL, R> {
/**
* @param client
* @return
* @throws DynoException
*/
public ListenableFuture<R> executeAsync(CL client) throws DynoException;
}
| 6,016 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/OperationResult.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Interface representing the result of executing an {@link Operation}
*
* @param <R>
* @author poberai
*/
public interface OperationResult<R> {
/**
* @return Get the host on which the operation was performed
*/
public Host getNode();
/**
* @param node
*/
public OperationResult<R> setNode(Host node);
/**
* @return Get the result data
*/
public R getResult();
/**
* @return Return the length of time to perform the operation. Does not include
* connection pool overhead. This time is in nanoseconds
*/
public long getLatency();
/**
* @param units
* @return Return the length of time to perform the operation to the remote service. Does not include
* connection pool overhead.
*/
public long getLatency(TimeUnit units);
/**
* @return Return the number of times the operation had to be retried. This includes
* retries for aborted connections.
*/
public int getAttemptsCount();
/**
* Set the number of attempts executing this connection
*
* @param count
*/
public OperationResult<R> setAttemptsCount(int count);
/**
* Set latency after executing the operation. This method is useful to apps that do async operations
* and can proxy back latency stats to Dyno once they receive their result via the future.
*
* @param duration
* @param unit
*/
public OperationResult<R> setLatency(long duration, TimeUnit unit);
/**
* Method that returns any other metadata that is associated with this OperationResult.
*
* @return Map<String , String>
*/
public Map<String, String> getMetadata();
/**
* Add metadata to the OperationResult. Can be used within different layers of Dyno to add metadata about each layer.
* Very useful for providing insight into the operation when debugging.
*
* @param key
* @param value
*/
public OperationResult<R> addMetadata(String key, String value);
/**
* Add metadata to the OperationResult. Can be used within different layers of Dyno to add metadata about each layer.
* Very useful for providing insight into the operation when debugging.
*
* @param map
*/
public OperationResult<R> addMetadata(Map<String, Object> map);
}
| 6,017 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/HostBuilder.java | package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.impl.utils.ConfigUtils;
import static com.netflix.dyno.connectionpool.Host.DEFAULT_DATASTORE_PORT;
import static com.netflix.dyno.connectionpool.Host.DEFAULT_PORT;
public class HostBuilder {
private String hostname;
private int port = DEFAULT_PORT;
private String rack;
private String ipAddress = null;
private int securePort = DEFAULT_PORT;
private int datastorePort = DEFAULT_DATASTORE_PORT;
private String datacenter = null;
private Host.Status status = Host.Status.Down;
private String hashtag = null;
private String password = null;
public HostBuilder setPort(int port) {
this.port = port;
return this;
}
public HostBuilder setRack(String rack) {
this.rack = rack;
return this;
}
public HostBuilder setHostname(String hostname) {
this.hostname = hostname;
return this;
}
public HostBuilder setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
return this;
}
public HostBuilder setSecurePort(int securePort) {
this.securePort = securePort;
return this;
}
public HostBuilder setDatacenter(String datacenter) {
this.datacenter = datacenter;
return this;
}
public HostBuilder setStatus(Host.Status status) {
this.status = status;
return this;
}
public HostBuilder setHashtag(String hashtag) {
this.hashtag = hashtag;
return this;
}
public HostBuilder setPassword(String password) {
this.password = password;
return this;
}
public HostBuilder setDatastorePort(int datastorePort) {
this.datastorePort = datastorePort;
return this;
}
public Host createHost() {
if (datacenter == null) {
datacenter = ConfigUtils.getDataCenterFromRack(rack);
}
return new Host(hostname, ipAddress, port, securePort, datastorePort, rack, datacenter, status, hashtag, password);
}
}
| 6,018 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ConnectionFactory.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.ThrottledException;
/**
* Factory used to create and open new connections on a host.
*
* @param <CL>
* @author poberai
*/
public interface ConnectionFactory<CL> {
/**
* Create a connection for this {@link HostConnectionPool}
*
* @param pool
* @return
* @throws DynoConnectException
* @throws ThrottledException
*/
Connection<CL> createConnection(HostConnectionPool<CL> pool) throws DynoConnectException;
Connection<CL> createConnectionWithDataStore(HostConnectionPool<CL> pool)
throws DynoConnectException;
Connection<CL> createConnectionWithConsistencyLevel(HostConnectionPool<CL> pool, String consistency)
throws DynoConnectException;
}
| 6,019 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ListenableFuture.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
public interface ListenableFuture<V> extends Future<V> {
public void addListener(Runnable listener, Executor executor);
}
| 6,020 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/HashPartitioner.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
/**
* Interface for generating hash for a given key.
* The implementing class is also responsible for determining how to map the hash to the underlying dynomite server topology
* expressed via the Collection<HostToken>
*
* @author poberai
*/
public interface HashPartitioner {
/**
* @param key
* @return Long
*/
public Long hash(int key);
/**
* @param key
* @return Long
*/
public Long hash(long key);
/**
* @param key
* @return Long
*/
public Long hash(String key);
/**
* @param key
* @return Long
*/
public Long hash(byte[] key);
/**
* @param keyHash
* @return
*/
public HostToken getToken(Long keyHash);
}
| 6,021 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/Host.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import org.apache.commons.lang3.StringUtils;
import java.net.InetSocketAddress;
import java.util.Objects;
/**
* Class encapsulating information about a host.
* <p>
* This is immutable except for the host status. Note that the HostSupplier may
* not know the Dynomite port, whereas the Host object created by the load
* balancer may receive the port the cluster_describe REST call. Hence, we must
* not use the port in the equality and hashCode calculations.
*
* @author poberai
* @author ipapapa
*/
public class Host implements Comparable<Host> {
public static final int DEFAULT_PORT = 8102;
public static final int DEFAULT_DATASTORE_PORT = 22122;
public static final Host NO_HOST = new HostBuilder().setHostname("UNKNOWN").setIpAddress("UNKNOWN").setPort(0)
.setRack("UNKNOWN").createHost();
private final String hostname;
private final String ipAddress;
private final int port;
private final int securePort;
private final int datastorePort;
private final InetSocketAddress socketAddress;
private final String rack;
private final String datacenter;
private String hashtag;
private Status status = Status.Down;
private final String password;
public enum Status {
Up, Down;
}
public Host(String hostname, String ipAddress, int port, int securePort, int datastorePort, String rack, String datacenter, Status status, String hashtag, String password) {
this.hostname = hostname;
this.ipAddress = ipAddress;
this.port = port;
this.securePort = securePort;
this.datastorePort = datastorePort;
this.rack = rack;
this.status = status;
this.datacenter = datacenter;
this.hashtag = hashtag;
this.password = StringUtils.isEmpty(password) ? null : password;
// Used for the unit tests to prevent host name resolution
if (port != -1) {
this.socketAddress = new InetSocketAddress(hostname, port);
} else {
this.socketAddress = null;
}
}
public String getHostAddress() {
if (this.ipAddress != null) {
return ipAddress;
}
return hostname;
}
public String getHostName() {
return hostname;
}
public String getIpAddress() {
return ipAddress;
}
public int getPort() {
return port;
}
public int getSecurePort() {
return securePort;
}
public int getDatastorePort() {
return datastorePort;
}
public String getDatacenter() {
return datacenter;
}
public String getRack() {
return rack;
}
public String getHashtag() {
return hashtag;
}
public void setHashtag(String hashtag) {
this.hashtag = hashtag;
}
public Host setStatus(Status condition) {
status = condition;
return this;
}
public boolean isUp() {
return status == Status.Up;
}
public String getPassword() {
return password;
}
public Status getStatus() {
return status;
}
public InetSocketAddress getSocketAddress() {
return socketAddress;
}
/**
* Equality checks will fail in collections between Host objects created
* from the HostSupplier, which may not know the Dynomite port, and the Host
* objects created by the token map supplier.
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((hostname == null) ? 0 : hostname.hashCode());
result = prime * result + ((rack == null) ? 0 : rack.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Host other = (Host) obj;
boolean equals = true;
equals &= (hostname != null) ? hostname.equals(other.hostname) : other.hostname == null;
equals &= (rack != null) ? rack.equals(other.rack) : other.rack == null;
return equals;
}
@Override
public int compareTo(Host o) {
int compared = this.hostname.compareTo(o.hostname);
if (compared != 0) {
return compared;
}
return this.rack.compareTo(o.rack);
}
@Override
public String toString() {
return "Host [hostname=" + hostname + ", ipAddress=" + ipAddress + ", port=" + port + ", rack: "
+ rack + ", datacenter: " + datacenter + ", status: " + status.name() + ", hashtag="
+ hashtag + ", password=" + (Objects.nonNull(password) ? "masked" : "null") + "]";
}
public static Host clone(Host host) {
return new HostBuilder().setHostname(host.getHostName())
.setIpAddress(host.getIpAddress()).setPort(host.getPort())
.setSecurePort(host.getSecurePort())
.setRack(host.getRack())
.setDatastorePort(host.getDatastorePort())
.setDatacenter(host.getDatacenter()).setStatus(host.getStatus())
.setHashtag(host.getHashtag())
.setPassword(host.getPassword()).createHost();
}
}
| 6,022 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/BaseOperation.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
/**
* Base for any operation that can be performed using a connection from the
* connection pool
*
* @param <CL> client
* @param <R> result
* @author poberai
* @author ipapapa
*/
public interface BaseOperation<CL, R> {
/**
* Op name. Used for tracking metrics etc.
*
* @return String
*/
public String getName();
/**
* The key for the operation. Useful for implementing token aware routing.
*
* @return String
*/
public String getStringKey();
/**
* The binary key for the operation
*
* @return
*/
public byte[] getBinaryKey();
}
| 6,023 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/ConnectionContext.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.Map;
/**
* Context specific to a connection. This interface makes it possible to store
* connection specific state such as prepared CQL statement ids etc.
*
* @author poberai
*/
public interface ConnectionContext {
/**
* Set metadata identified by 'key'
*
* @param key
* @param obj
*/
public void setMetadata(String key, Object obj);
/**
* @param key
* @return Get metadata stored by calling setMetadata
*/
public Object getMetadata(String key);
/**
* @param key
* @return Return true if the metadata with the specified key exists.
*/
public boolean hasMetadata(String key);
/**
* Reset all metadata
*/
public void reset();
/**
* Return all context
*
* @return Map<String , Object>
*/
public Map<String, Object> getAll();
}
| 6,024 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/DecoratingFuture.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class DecoratingFuture<V> implements Future<V> {
private final ListenableFuture<OperationResult<V>> innerFuture;
public DecoratingFuture(final ListenableFuture<OperationResult<V>> listenableFuture) {
innerFuture = listenableFuture;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return innerFuture.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return innerFuture.isCancelled();
}
@Override
public boolean isDone() {
return innerFuture.isDone();
}
@Override
public V get() throws InterruptedException, ExecutionException {
OperationResult<V> opResult = innerFuture.get();
return opResult.getResult();
}
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
OperationResult<V> opResult = innerFuture.get(timeout, unit);
return opResult.getResult();
}
}
| 6,025 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/HealthTracker.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Base interface for classes that track error rates for the connection pool.
*
* @param <CL> the client type
*/
public interface HealthTracker<CL> {
void trackConnectionError(HostConnectionPool<CL> hostPool, DynoException e);
}
| 6,026 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/OperationMonitor.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool;
import java.util.concurrent.TimeUnit;
/**
* Interface for recording high level stats for operation executions.
* Includes latency, success and failures.
*
* @author poberai
*/
public interface OperationMonitor {
/**
* Record latency for the operation
*
* @param opName
* @param duration
* @param unit
*/
void recordLatency(String opName, long duration, TimeUnit unit);
/**
* Record success for the operation
*
* @param opName
*/
void recordSuccess(String opName);
/**
* Record success while specifying if compression has been enabled for
* the operation. Note that this means that either the value was
* compressed or decompressed during the operation. If compression has been
* enabled but the value does not reach the threshold then this will be false.
*
* @param opName
* @param compressionEnabled
*/
void recordSuccess(String opName, boolean compressionEnabled);
/**
* Record failure for the operation
*
* @param opName
* @param reason
*/
void recordFailure(String opName, String reason);
void recordFailure(String opName, boolean compressionEnabled, String reason);
}
| 6,027 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/HostSelectionStrategy.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.BaseOperation;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* Interface that encapsulates a strategy for selecting a {@link Connection} to a {@link Host} for the given {@link BaseOperation}
*
* @param <CL>
* @author poberai
*/
public interface HostSelectionStrategy<CL> {
/**
* @param op
* @param hashtag
* @return
* @throws NoAvailableHostsException
*/
HostConnectionPool<CL> getPoolForOperation(BaseOperation<CL, ?> op, String hashtag)
throws NoAvailableHostsException;
/**
* @param ops
* @return
* @throws NoAvailableHostsException
*/
Map<HostConnectionPool<CL>, BaseOperation<CL, ?>> getPoolsForOperationBatch(Collection<BaseOperation<CL, ?>> ops) throws NoAvailableHostsException;
/**
* @return
*/
List<HostConnectionPool<CL>> getOrderedHostPools();
/**
* @param token
* @return
*/
HostConnectionPool<CL> getPoolForToken(Long token);
/**
* @param start
* @param end
* @return
*/
List<HostConnectionPool<CL>> getPoolsForTokens(Long start, Long end);
/**
* Finds the server Host that owns the specified key.
*
* @param key
* @return {@link HostToken}
* @throws UnsupportedOperationException for non-token aware load balancing strategies
*/
HostToken getTokenForKey(String key) throws UnsupportedOperationException;
/**
* Finds the server Host that owns the specified binary key.
*
* @param key
* @return {@link HostToken}
* @throws UnsupportedOperationException for non-token aware load balancing strategies
*/
HostToken getTokenForKey(byte[] key) throws UnsupportedOperationException;
/**
* Init the connection pool with the set of hosts provided
*
* @param hostPools
*/
void initWithHosts(Map<HostToken, HostConnectionPool<CL>> hostPools);
/**
* Add a host to the selection strategy. This is useful when the underlying dynomite topology changes.
*
* @param {@link com.netflix.dyno.connectionpool.impl.lb.HostToken}
* @param hostPool
* @return true/false indicating whether the pool was indeed added
*/
boolean addHostPool(HostToken host, HostConnectionPool<CL> hostPool);
/**
* Remove a host from the selection strategy. This is useful when the underlying dynomite topology changes.
*
* @param {@link com.netflix.dyno.connectionpool.impl.lb.HostToken}
* @return true/false indicating whether the pool was indeed removed
*/
boolean removeHostPool(HostToken host);
boolean isTokenAware();
boolean isEmpty();
interface HostSelectionStrategyFactory<CL> {
/**
* Create/Return a HostSelectionStrategy
*
* @return HostSelectionStrategy
*/
public HostSelectionStrategy<CL> vendPoolSelectionStrategy();
}
}
| 6,028 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/ConnectionPoolImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.FatalConnectionException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.exception.PoolExhaustedException;
import com.netflix.dyno.connectionpool.impl.HostConnectionPoolFactory.Type;
import com.netflix.dyno.connectionpool.impl.health.ConnectionPoolHealthTracker;
import com.netflix.dyno.connectionpool.impl.lb.HostSelectionWithFallback;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils.Predicate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanRegistrationException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Main implementation class for {@link ConnectionPool} The pool deals with a
* bunch of other components and brings together all the functionality for Dyno.
* Hence this is where all the action happens.
* <p>
* Here are the top salient features of this class.
* <p>
* 1. Manages a collection of {@link HostConnectionPool}s for all the
* {@link Host}s that it receives from the {@link HostSupplier}
* <p>
* 2. Manages adding and removing hosts as dictated by the HostSupplier.
* <p>
* 3. Enables execution of {@link Operation} using a {@link Connection} borrowed
* from the {@link HostConnectionPool}s
* <p>
* 4. Employs a {@link HostSelectionStrategy} (basically Round Robin or Token
* Aware) when executing operations
* <p>
* 5. Uses a health check monitor for tracking error rates from the execution of
* operations. The health check monitor can then decide to recycle a given
* HostConnectionPool, and execute requests using fallback HostConnectionPools
* for remote DCs.
* <p>
* 6. Uses {@link RetryPolicy} when executing operations for better resilience
* against transient failures.
*
* @param <CL>
* @author poberai
* @see {@link HostSupplier} {@link Host} {@link HostSelectionStrategy}
* @see {@link Connection} {@link ConnectionFactory}
* {@link ConnectionPoolConfiguration} {@link ConnectionPoolMonitor}
* @see {@link ConnectionPoolHealthTracker}
*/
public class ConnectionPoolImpl<CL> implements ConnectionPool<CL>, TopologyView {
private static final Logger Logger = LoggerFactory.getLogger(ConnectionPoolImpl.class);
private final ConcurrentHashMap<Host, HostConnectionPool<CL>> cpMap = new ConcurrentHashMap<Host, HostConnectionPool<CL>>();
private final ConnectionPoolHealthTracker<CL> cpHealthTracker;
private final HostConnectionPoolFactory<CL> hostConnPoolFactory;
private final ConnectionFactory<CL> connFactory;
private final ConnectionPoolConfiguration cpConfiguration;
private final ConnectionPoolMonitor cpMonitor;
private final ScheduledExecutorService idleThreadPool = Executors.newSingleThreadScheduledExecutor();
private final HostsUpdater hostsUpdater;
private final ScheduledExecutorService connPoolThreadPool = Executors.newScheduledThreadPool(1);
private final AtomicBoolean started = new AtomicBoolean(false);
private final AtomicBoolean idling = new AtomicBoolean(false);
private HostSelectionWithFallback<CL> selectionStrategy;
private Type poolType;
public ConnectionPoolImpl(ConnectionFactory<CL> cFactory, ConnectionPoolConfiguration cpConfig,
ConnectionPoolMonitor cpMon) {
this(cFactory, cpConfig, cpMon, Type.Sync);
}
public ConnectionPoolImpl(ConnectionFactory<CL> cFactory, ConnectionPoolConfiguration cpConfig,
ConnectionPoolMonitor cpMon, Type type) {
this.connFactory = cFactory;
this.cpConfiguration = cpConfig;
this.cpMonitor = cpMon;
this.poolType = type;
this.cpHealthTracker = new ConnectionPoolHealthTracker<CL>(cpConfiguration, connPoolThreadPool);
switch (type) {
case Sync:
hostConnPoolFactory = new SyncHostConnectionPoolFactory();
break;
case Async:
hostConnPoolFactory = new AsyncHostConnectionPoolFactory();
break;
default:
throw new RuntimeException("unknown type");
}
this.hostsUpdater = new HostsUpdater(cpConfiguration.getHostSupplier(), cpConfiguration.getTokenSupplier());
}
public String getName() {
return cpConfiguration.getName();
}
public ConnectionPoolMonitor getMonitor() {
return cpMonitor;
}
public ConnectionPoolHealthTracker<CL> getHealthTracker() {
return cpHealthTracker;
}
@Override
public boolean isIdle() {
return idling.get();
}
@Override
public boolean addHost(Host host) {
return addHost(host, true);
}
public boolean addHost(Host host, boolean refreshLoadBalancer) {
//host.setPort(cpConfiguration.getPort());
HostConnectionPool<CL> connPool = cpMap.get(host);
if (connPool != null) {
if (Logger.isDebugEnabled()) {
Logger.debug("HostConnectionPool already exists for host: " + host + ", ignoring addHost");
}
return false;
}
final HostConnectionPool<CL> hostPool = hostConnPoolFactory.createHostConnectionPool(host, this);
HostConnectionPool<CL> prevPool = cpMap.putIfAbsent(host, hostPool);
if (prevPool == null) {
// This is the first time we are adding this pool.
Logger.info("Adding host connection pool for host: " + host);
try {
int primed = hostPool.primeConnections();
Logger.info("Successfully primed " + primed + " of " + cpConfiguration.getMaxConnsPerHost() + " to "
+ host);
if (hostPool.isActive()) {
if (refreshLoadBalancer) {
selectionStrategy.addHost(host, hostPool);
}
cpHealthTracker.initializePingHealthchecksForPool(hostPool);
cpMonitor.hostAdded(host, hostPool);
} else {
Logger.info(
"Failed to prime enough connections to host " + host + " for it take traffic; will retry");
cpMap.remove(host);
}
return primed > 0;
} catch (DynoException e) {
Logger.error("Failed to init host pool for host: " + host, e);
cpMap.remove(host);
return false;
}
} else {
return false;
}
}
@Override
public boolean removeHost(Host host) {
Logger.info(String.format("Removing host %s from selectionStrategy, cpHealthTracker, cpMonitor",
host.getHostAddress()));
// Since there are multiple data structures of host, token, connection pool etc, call removehost even
// if it is not found in the cpMap
selectionStrategy.removeHost(host);
cpHealthTracker.removeHost(host);
cpMonitor.hostRemoved(host);
HostConnectionPool<CL> hostPool = cpMap.remove(host);
if (hostPool != null) {
hostPool.shutdown();
Logger.info(String.format("Remove host: Successfully removed hostPool for host %s from connection pool",
host.getHostAddress()));
return true;
} else {
Logger.info(String.format("Remove host: Host pool for host %s NOT FOUND in the connection pool", host.getHostAddress()));
return false;
}
}
@Override
public boolean isHostUp(Host host) {
HostConnectionPool<CL> hostPool = cpMap.get(host);
return (hostPool != null) ? hostPool.isActive() : false;
}
@Override
public boolean hasHost(Host host) {
return cpMap.get(host) != null;
}
@Override
public List<HostConnectionPool<CL>> getActivePools() {
return new ArrayList<HostConnectionPool<CL>>(
CollectionUtils.filter(getPools(), new Predicate<HostConnectionPool<CL>>() {
@Override
public boolean apply(HostConnectionPool<CL> hostPool) {
if (hostPool == null) {
return false;
}
return hostPool.isActive();
}
}));
}
@Override
public List<HostConnectionPool<CL>> getPools() {
return new ArrayList<HostConnectionPool<CL>>(cpMap.values());
}
@Override
public Future<Boolean> updateHosts(Collection<Host> hostsUp, Collection<Host> hostsDown) {
Logger.debug(String.format("Updating hosts: UP=%s, DOWN=%s", hostsUp, hostsDown));
boolean condition = false;
if (hostsUp != null && !hostsUp.isEmpty()) {
for (Host hostUp : hostsUp) {
condition |= addHost(hostUp);
}
}
if (hostsDown != null && !hostsDown.isEmpty()) {
for (Host hostDown : hostsDown) {
condition |= removeHost(hostDown);
}
}
return getEmptyFutureTask(condition);
}
@Override
public HostConnectionPool<CL> getHostPool(Host host) {
return cpMap.get(host);
}
@Override
public <R> OperationResult<R> executeWithFailover(Operation<CL, R> op) throws DynoException {
// Start recording the operation
long startTime = System.currentTimeMillis();
RetryPolicy retry = cpConfiguration.getRetryPolicyFactory().getRetryPolicy();
retry.begin();
DynoException lastException = null;
do {
Connection<CL> connection = null;
try {
connection = selectionStrategy.getConnectionUsingRetryPolicy(op,
cpConfiguration.getMaxTimeoutWhenExhausted(), TimeUnit.MILLISECONDS, retry);
updateConnectionContext(connection.getContext(), connection.getHost());
OperationResult<R> result = connection.execute(op);
// Add context to the result from the successful execution
result.setNode(connection.getHost()).addMetadata(connection.getContext().getAll());
retry.success();
cpMonitor.incOperationSuccess(connection.getHost(), System.currentTimeMillis() - startTime);
return result;
} catch (NoAvailableHostsException e) {
cpMonitor.incOperationFailure(null, e);
throw e;
} catch (PoolExhaustedException e) {
Logger.warn("Pool exhausted: " + e.getMessage());
cpMonitor.incOperationFailure(null, e);
cpHealthTracker.trackConnectionError(e.getHostConnectionPool(), e);
} catch (DynoException e) {
retry.failure(e);
lastException = e;
if (connection != null) {
cpMonitor.incOperationFailure(connection.getHost(), e);
if (retry.allowRetry()) {
cpMonitor.incFailover(connection.getHost(), e);
}
// Track the connection health so that the pool can be
// purged at a later point
cpHealthTracker.trackConnectionError(connection.getParentConnectionPool(), lastException);
} else {
cpMonitor.incOperationFailure(null, e);
}
} catch (Throwable t) {
throw new RuntimeException(t);
} finally {
if (connection != null) {
if (connection.getLastException() != null
&& connection.getLastException() instanceof FatalConnectionException) {
Logger.warn("Received FatalConnectionException; closing connection "
+ connection.getContext().getAll() + " to host "
+ connection.getParentConnectionPool().getHost());
connection.getParentConnectionPool().recycleConnection(connection);
// note - don't increment connection closed metric here;
// it's done in closeConnection
} else {
connection.getContext().reset();
connection.getParentConnectionPool().returnConnection(connection);
}
}
}
} while (retry.allowRetry());
throw lastException;
}
@Override
public <R> Collection<OperationResult<R>> executeWithRing(TokenRackMapper tokenRackMapper, Operation<CL, R> op) throws DynoException {
// Start recording the operation
long startTime = System.currentTimeMillis();
Collection<Connection<CL>> connections = selectionStrategy
.getConnectionsToRing(tokenRackMapper, cpConfiguration.getMaxTimeoutWhenExhausted(), TimeUnit.MILLISECONDS);
LinkedBlockingQueue<Connection<CL>> connQueue = new LinkedBlockingQueue<Connection<CL>>();
connQueue.addAll(connections);
List<OperationResult<R>> results = new ArrayList<OperationResult<R>>();
DynoException lastException = null;
try {
while (!connQueue.isEmpty()) {
Connection<CL> connection = connQueue.poll();
RetryPolicy retry = cpConfiguration.getRetryPolicyFactory().getRetryPolicy();
retry.begin();
do {
try {
updateConnectionContext(connection.getContext(), connection.getHost());
OperationResult<R> result = connection.execute(op);
// Add context to the result from the successful
// execution
result.setNode(connection.getHost()).addMetadata(connection.getContext().getAll());
retry.success();
cpMonitor.incOperationSuccess(connection.getHost(), System.currentTimeMillis() - startTime);
results.add(result);
} catch (NoAvailableHostsException e) {
cpMonitor.incOperationFailure(null, e);
throw e;
} catch (DynoException e) {
retry.failure(e);
lastException = e;
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, e);
// Track the connection health so that the pool can be
// purged at a later point
if (connection != null) {
cpHealthTracker.trackConnectionError(connection.getParentConnectionPool(), lastException);
}
} catch (Throwable t) {
throw new RuntimeException(t);
} finally {
connection.getContext().reset();
connection.getParentConnectionPool().returnConnection(connection);
}
} while (retry.allowRetry());
}
// we fail the entire operation on a partial failure. hence need to
// clean up the rest of the pending connections
} finally {
List<Connection<CL>> remainingConns = new ArrayList<Connection<CL>>();
connQueue.drainTo(remainingConns);
for (Connection<CL> connectionToClose : remainingConns) {
try {
connectionToClose.getContext().reset();
connectionToClose.getParentConnectionPool().returnConnection(connectionToClose);
} catch (Throwable t) {
}
}
}
if (lastException != null) {
throw lastException;
} else {
return results;
}
}
private void updateConnectionContext(ConnectionContext context, Host host) {
context.setMetadata("host", host.getHostAddress());
context.setMetadata("port", host.getPort());
context.setMetadata("datastorePort", host.getDatastorePort());
}
/**
* Use with EXTREME CAUTION. Connection that is borrowed must be returned,
* else we will have connection pool exhaustion
*
* @param baseOperation
* @return
*/
public <R> Connection<CL> getConnectionForOperation(BaseOperation<CL, R> baseOperation) {
return selectionStrategy.getConnection(baseOperation, cpConfiguration.getMaxTimeoutWhenExhausted(),
TimeUnit.MILLISECONDS);
}
@Override
public void shutdown() {
if (started.get()) {
for (Host host : cpMap.keySet()) {
removeHost(host);
}
cpHealthTracker.stop();
hostsUpdater.stop();
connPoolThreadPool.shutdownNow();
deregisterMonitorConsoleMBean();
}
}
@Override
public Future<Boolean> start() throws DynoException {
if (started.get()) {
return getEmptyFutureTask(false);
}
HostSupplier hostSupplier = cpConfiguration.getHostSupplier();
if (hostSupplier == null) {
throw new DynoException("Host supplier not configured!");
}
HostStatusTracker hostStatus = hostsUpdater.refreshHosts();
cpMonitor.setHostCount(hostStatus.getHostCount());
Collection<Host> hostsUp = hostStatus.getActiveHosts();
if (hostsUp == null || hostsUp.isEmpty()) {
throw new NoAvailableHostsException("No available hosts when starting connection pool");
}
final ExecutorService threadPool = Executors.newFixedThreadPool(Math.max(10, hostsUp.size()));
final List<Future<Void>> futures = new ArrayList<Future<Void>>();
for (final Host host : hostsUp) {
// Add host connection pool, but don't init the load balancer yet
futures.add(threadPool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
addHost(host, false);
return null;
}
}));
}
try {
for (Future<Void> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
// do nothing
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
} finally {
threadPool.shutdownNow();
}
boolean success = started.compareAndSet(false, true);
if (success) {
idling.set(false);
idleThreadPool.shutdownNow();
selectionStrategy = initSelectionStrategy();
cpHealthTracker.start();
connPoolThreadPool.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
HostStatusTracker hostStatus = hostsUpdater.refreshHosts();
cpMonitor.setHostCount(hostStatus.getHostCount());
Logger.debug(hostStatus.toString());
updateHosts(hostStatus.getActiveHosts(), hostStatus.getInactiveHosts());
} catch (Throwable throwable) {
Logger.error("Failed to update hosts cache", throwable);
}
}
}, 15 * 1000, 30 * 1000, TimeUnit.MILLISECONDS);
MonitorConsole.getInstance().registerConnectionPool(this);
registerMonitorConsoleMBean(MonitorConsole.getInstance());
}
return getEmptyFutureTask(true);
}
@Override
public void idle() {
if (this.started.get()) {
throw new IllegalStateException("Cannot move from started to idle once the pool has been started");
}
if (idling.compareAndSet(false, true)) {
idleThreadPool.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
if (!started.get()) {
try {
HostStatusTracker hostStatus = hostsUpdater.refreshHosts();
cpMonitor.setHostCount(hostStatus.getHostCount());
Collection<Host> hostsUp = hostStatus.getActiveHosts();
if (hostsUp.size() > 0) {
Logger.debug("Found hosts while IDLING; starting the connection pool");
start().get();
}
} catch (NoAvailableHostsException nah) {
Logger.debug("No hosts found, will continue IDLING");
} catch (DynoException de) {
Logger.warn("Attempt to start connection pool FAILED", de);
} catch (Exception e) {
Logger.warn("Attempt to start connection pool FAILED", e);
}
}
}
}, 30, 60, TimeUnit.SECONDS);
}
}
@Override
public ConnectionPoolConfiguration getConfiguration() {
return cpConfiguration;
}
private void registerMonitorConsoleMBean(MonitorConsoleMBean bean) {
final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
ObjectName objectName = new ObjectName(MonitorConsole.OBJECT_NAME);
if (!server.isRegistered(objectName)) {
server.registerMBean(bean, objectName);
Logger.info("registered mbean " + objectName);
} else {
Logger.info("mbean " + objectName + " has already been registered !");
}
} catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException
| NotCompliantMBeanException ex) {
Logger.error("Unable to register MonitorConsole mbean ", ex);
}
}
private void deregisterMonitorConsoleMBean() {
final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
ObjectName objectName = new ObjectName(MonitorConsole.OBJECT_NAME);
if (server.isRegistered(objectName)) {
server.unregisterMBean(objectName);
Logger.info("deregistered mbean " + objectName);
}
} catch (MalformedObjectNameException | MBeanRegistrationException | InstanceNotFoundException ex) {
Logger.error("Unable to deregister MonitorConsole mbean ", ex);
}
}
private HostSelectionWithFallback<CL> initSelectionStrategy() {
if (cpConfiguration.getTokenSupplier() == null) {
throw new RuntimeException("TokenMapSupplier not configured");
}
HostSelectionWithFallback<CL> selection = new HostSelectionWithFallback<CL>(cpConfiguration, cpMonitor);
selection.initWithHosts(cpMap);
return selection;
}
private Future<Boolean> getEmptyFutureTask(final Boolean condition) {
FutureTask<Boolean> future = new FutureTask<Boolean>(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return condition;
}
});
future.run();
return future;
}
private class SyncHostConnectionPoolFactory implements HostConnectionPoolFactory<CL> {
@Override
public HostConnectionPool<CL> createHostConnectionPool(Host host, ConnectionPoolImpl<CL> parentPoolImpl) {
return new HostConnectionPoolImpl<CL>(host, connFactory, cpConfiguration, cpMonitor);
}
}
private class AsyncHostConnectionPoolFactory implements HostConnectionPoolFactory<CL> {
@Override
public HostConnectionPool<CL> createHostConnectionPool(Host host, ConnectionPoolImpl<CL> parentPoolImpl) {
return new SimpleAsyncConnectionPoolImpl<CL>(host, connFactory, cpConfiguration, cpMonitor);
}
}
@Override
public <R> ListenableFuture<OperationResult<R>> executeAsync(AsyncOperation<CL, R> op) throws DynoException {
DynoException lastException = null;
Connection<CL> connection = null;
long startTime = System.currentTimeMillis();
try {
connection = selectionStrategy.getConnection(op, cpConfiguration.getMaxTimeoutWhenExhausted(),
TimeUnit.MILLISECONDS);
ListenableFuture<OperationResult<R>> futureResult = connection.executeAsync(op);
cpMonitor.incOperationSuccess(connection.getHost(), System.currentTimeMillis() - startTime);
return futureResult;
} catch (NoAvailableHostsException e) {
cpMonitor.incOperationFailure(null, e);
throw e;
} catch (DynoException e) {
lastException = e;
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, e);
// Track the connection health so that the pool can be purged at a
// later point
if (connection != null) {
cpHealthTracker.trackConnectionError(connection.getParentConnectionPool(), lastException);
}
} catch (Throwable t) {
t.printStackTrace();
} finally {
if (connection != null) {
connection.getParentConnectionPool().returnConnection(connection);
}
}
return null;
}
public TokenPoolTopology getTopology() {
return selectionStrategy.getTokenPoolTopology();
}
@Override
public Map<String, List<TokenPoolTopology.TokenStatus>> getTopologySnapshot() {
return Collections.unmodifiableMap(selectionStrategy.getTokenPoolTopology().getAllTokens());
}
@Override
public Long getTokenForKey(String key) {
if (cpConfiguration
.getLoadBalancingStrategy() == ConnectionPoolConfiguration.LoadBalancingStrategy.TokenAware) {
return selectionStrategy.getTokenForKey(key);
}
return null;
}
}
| 6,029 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/ConnectionPoolConfigurationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ErrorRateMonitorConfig;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.RetryPolicy;
import com.netflix.dyno.connectionpool.RetryPolicy.RetryPolicyFactory;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.health.ErrorMonitor.ErrorMonitorFactory;
import com.netflix.dyno.connectionpool.impl.health.SimpleErrorMonitorImpl.SimpleErrorMonitorFactory;
import com.netflix.dyno.connectionpool.impl.utils.ConfigUtils;
import java.util.ArrayList;
import java.util.List;
public class ConnectionPoolConfigurationImpl implements ConnectionPoolConfiguration {
// DEFAULTS
private static final int DEFAULT_MAX_CONNS_PER_HOST = 3;
private static final int DEFAULT_MAX_TIMEOUT_WHEN_EXHAUSTED = 800;
private static final int DEFAULT_MAX_FAILOVER_COUNT = 3;
private static final int DEFAULT_CONNECT_TIMEOUT = 3000;
private static final int DEFAULT_SOCKET_TIMEOUT = 12000;
private static final int DEFAULT_POOL_SHUTDOWN_DELAY = 60000;
private static final int DEFAULT_PING_FREQ_SECONDS = 30;
private static final int DEFAULT_FLUSH_TIMINGS_FREQ_SECONDS = 300;
private static final boolean DEFAULT_LOCAL_RACK_AFFINITY = true;
private static final LoadBalancingStrategy DEFAULT_LB_STRATEGY = LoadBalancingStrategy.TokenAware;
private static final CompressionStrategy DEFAULT_COMPRESSION_STRATEGY = CompressionStrategy.NONE;
private static final String DEFAULT_CONFIG_PUBLISHER_ADDRESS = null;
private static final boolean DEFAULT_FAIL_ON_STARTUP_IFNOHOSTS = true;
private static final int DEFAULT_FAIL_ON_STARTUP_IFNOHOSTS_SECONDS = 60;
private static final int DEFAULT_VALUE_COMPRESSION_THRESHOLD_BYTES = 5 * 1024; // By default, compression is OFF
private static final boolean DEFAULT_IS_DUAL_WRITE_ENABLED = false;
private static final int DEFAULT_DUAL_WRITE_PERCENTAGE = 0;
private static final int DEFAULT_HEALTH_TRACKER_DELAY_MILLIS = 10 * 1000;
private static final int DEFAULT_POOL_RECONNECT_WAIT_MILLIS = 5 * 1000;
private static final boolean DEFAULT_FALLBACK_POLICY = true;
private static final boolean DEFAULT_CONNECT_TO_DATASTORE = false;
private static final int DEFAULT_LOCK_VOTING_SIZE = -1;
private static final String UNSET_CONNECTION_POOL_CONSISTENCY = "UNSET_CONFIG";
private HostSupplier hostSupplier;
private TokenMapSupplier tokenSupplier;
private HostConnectionPoolFactory hostConnectionPoolFactory;
private HashPartitioner hashPartitioner;
private String hashtag;
private final String name;
private int maxConnsPerHost = DEFAULT_MAX_CONNS_PER_HOST;
private int maxTimeoutWhenExhausted = DEFAULT_MAX_TIMEOUT_WHEN_EXHAUSTED;
private int maxFailoverCount = DEFAULT_MAX_FAILOVER_COUNT;
private int connectTimeout = DEFAULT_CONNECT_TIMEOUT;
private int socketTimeout = DEFAULT_SOCKET_TIMEOUT;
private int poolShutdownDelay = DEFAULT_POOL_SHUTDOWN_DELAY;
private int pingFrequencySeconds = DEFAULT_PING_FREQ_SECONDS;
private int flushTimingsFrequencySeconds = DEFAULT_FLUSH_TIMINGS_FREQ_SECONDS;
private boolean localZoneAffinity = DEFAULT_LOCAL_RACK_AFFINITY;
private LoadBalancingStrategy lbStrategy = DEFAULT_LB_STRATEGY;
private String localRack;
private String localDataCenter;
private boolean failOnStartupIfNoHosts = DEFAULT_FAIL_ON_STARTUP_IFNOHOSTS;
private int failOnStarupIfNoHostsSeconds = DEFAULT_FAIL_ON_STARTUP_IFNOHOSTS_SECONDS;
private CompressionStrategy compressionStrategy = DEFAULT_COMPRESSION_STRATEGY;
private int valueCompressionThreshold = DEFAULT_VALUE_COMPRESSION_THRESHOLD_BYTES;
// Dual Write Settings
private boolean isDualWriteEnabled = DEFAULT_IS_DUAL_WRITE_ENABLED;
private String dualWriteClusterName = null;
private int dualWritePercentage = DEFAULT_DUAL_WRITE_PERCENTAGE;
private int healthTrackerDelayMillis = DEFAULT_HEALTH_TRACKER_DELAY_MILLIS;
private int poolReconnectWaitMillis = DEFAULT_POOL_RECONNECT_WAIT_MILLIS;
private int lockVotingSize = DEFAULT_LOCK_VOTING_SIZE;
private boolean fallbackEnabled = DEFAULT_FALLBACK_POLICY;
private boolean connectToDatastore = DEFAULT_CONNECT_TO_DATASTORE;
private String connectionPoolConsistency = UNSET_CONNECTION_POOL_CONSISTENCY;
private RetryPolicyFactory retryFactory = new RetryPolicyFactory() {
@Override
public RetryPolicy getRetryPolicy() {
return new RunOnce();
}
};
private ErrorMonitorFactory errorMonitorFactory = new SimpleErrorMonitorFactory();
public ConnectionPoolConfigurationImpl(String name) {
this.name = name;
this.localRack = ConfigUtils.getLocalZone();
this.localDataCenter = ConfigUtils.getDataCenter();
}
/**
* Copy constructor used to construct a new instance of this config with mostly the same values as the given
* config.
*
* @param config
*/
public ConnectionPoolConfigurationImpl(ConnectionPoolConfigurationImpl config) {
this.name = config.getName() + "_shadow";
this.compressionStrategy = config.getCompressionStrategy();
this.valueCompressionThreshold = config.getValueCompressionThreshold();
this.connectTimeout = config.getConnectTimeout();
this.failOnStartupIfNoHosts = config.getFailOnStartupIfNoHosts();
this.lbStrategy = config.getLoadBalancingStrategy();
this.localDataCenter = config.getLocalDataCenter();
this.localRack = config.getLocalRack();
this.localZoneAffinity = config.localZoneAffinity;
this.maxConnsPerHost = config.getMaxConnsPerHost();
this.maxFailoverCount = config.getMaxFailoverCount();
this.maxTimeoutWhenExhausted = config.getMaxTimeoutWhenExhausted();
this.pingFrequencySeconds = config.getPingFrequencySeconds();
this.retryFactory = config.getRetryPolicyFactory();
this.socketTimeout = config.getSocketTimeout();
this.errorMonitorFactory = config.getErrorMonitorFactory();
this.tokenSupplier = config.getTokenSupplier();
this.hashPartitioner = config.getHashPartitioner();
this.isDualWriteEnabled = config.isDualWriteEnabled();
this.dualWriteClusterName = config.getDualWriteClusterName();
this.dualWritePercentage = config.getDualWritePercentage();
this.hashtag = config.getHashtag();
this.healthTrackerDelayMillis = config.getHealthTrackerDelayMillis();
this.poolReconnectWaitMillis = config.getPoolReconnectWaitMillis();
this.lockVotingSize = config.getLockVotingSize();
}
@Override
public boolean isConnectToDatastore() {
return connectToDatastore;
}
@Override
public boolean isConnectionPoolConsistencyProvided() {
return !(this.connectionPoolConsistency.compareTo(UNSET_CONNECTION_POOL_CONSISTENCY) == 0);
}
@Override
public boolean isFallbackEnabled() {
return fallbackEnabled;
}
@Override
public int getLockVotingSize() {
return lockVotingSize;
}
@Override
public String getName() {
return name;
}
@Override
public int getMaxConnsPerHost() {
return maxConnsPerHost;
}
@Override
public int getMaxTimeoutWhenExhausted() {
return maxTimeoutWhenExhausted;
}
@Override
public int getMaxFailoverCount() {
return maxFailoverCount;
}
@Override
public int getConnectTimeout() {
return connectTimeout;
}
@Override
public int getSocketTimeout() {
return socketTimeout;
}
@Override
public RetryPolicyFactory getRetryPolicyFactory() {
return retryFactory;
}
@Override
public boolean localZoneAffinity() {
return localZoneAffinity;
}
@Override
public ErrorMonitorFactory getErrorMonitorFactory() {
return errorMonitorFactory;
}
@Override
public LoadBalancingStrategy getLoadBalancingStrategy() {
return lbStrategy;
}
@Override
public int getPingFrequencySeconds() {
return pingFrequencySeconds;
}
@Override
public String getLocalRack() {
return localRack;
}
@Override
public String getLocalDataCenter() {
return localDataCenter;
}
@Override
public int getTimingCountersResetFrequencySeconds() {
return flushTimingsFrequencySeconds;
}
@Override
public String getConfigurationPublisherConfig() {
return null;
}
@Override
public boolean getFailOnStartupIfNoHosts() {
return failOnStartupIfNoHosts;
}
@Override
public CompressionStrategy getCompressionStrategy() {
return compressionStrategy;
}
@Override
public int getValueCompressionThreshold() {
return valueCompressionThreshold;
}
public int getDefaultFailOnStartupIfNoHostsSeconds() {
return failOnStarupIfNoHostsSeconds;
}
@Override
public boolean isDualWriteEnabled() {
return isDualWriteEnabled;
}
@Override
public String getDualWriteClusterName() {
return dualWriteClusterName;
}
@Override
public int getDualWritePercentage() {
return dualWritePercentage;
}
@Override
public int getHealthTrackerDelayMillis() {
return healthTrackerDelayMillis;
}
@Override
public int getPoolReconnectWaitMillis() {
return poolReconnectWaitMillis;
}
@Override
public String toString() {
return "ConnectionPoolConfigurationImpl{" +
"name=" + name +
", hostSupplier=" + hostSupplier +
", tokenSupplier=" + tokenSupplier +
", hostConnectionPoolFactory=" + hostConnectionPoolFactory +
", name='" + name + '\'' +
", maxConnsPerHost=" + maxConnsPerHost +
", maxTimeoutWhenExhausted=" + maxTimeoutWhenExhausted +
", maxFailoverCount=" + maxFailoverCount +
", connectTimeout=" + connectTimeout +
", socketTimeout=" + socketTimeout +
", poolShutdownDelay=" + poolShutdownDelay +
", pingFrequencySeconds=" + pingFrequencySeconds +
", flushTimingsFrequencySeconds=" + flushTimingsFrequencySeconds +
", localZoneAffinity=" + localZoneAffinity +
", lbStrategy=" + lbStrategy +
", hashPartitioner=" + hashPartitioner +
", localRack='" + localRack + '\'' +
", localDataCenter='" + localDataCenter + '\'' +
", failOnStartupIfNoHosts=" + failOnStartupIfNoHosts +
", failOnStarupIfNoHostsSeconds=" + failOnStarupIfNoHostsSeconds +
", compressionStrategy=" + compressionStrategy +
", valueCompressionThreshold=" + valueCompressionThreshold +
", isDualWriteEnabled=" + isDualWriteEnabled +
", dualWriteClusterName='" + dualWriteClusterName + '\'' +
", dualWritePercentage=" + dualWritePercentage +
", retryFactory=" + retryFactory +
", errorMonitorFactory=" + errorMonitorFactory +
", hashtag=" + hashtag +
", healthTrackerDelayMillis=" + healthTrackerDelayMillis +
", poolReconnectWaitMillis=" + poolReconnectWaitMillis +
", lockVotingSize =" + lockVotingSize +
'}';
}
// ALL SETTERS
public void setConnectToDatastore(boolean connectToDatastore) {
this.connectToDatastore = connectToDatastore;
}
public void setConnectionPoolConsistency(String consistency) {
this.connectionPoolConsistency = consistency;
}
public ConnectionPoolConfigurationImpl setFallbackEnabled(boolean fallbackEnabled) {
this.fallbackEnabled = fallbackEnabled;
return this;
}
public ConnectionPoolConfigurationImpl setMaxConnsPerHost(int maxConnsPerHost) {
this.maxConnsPerHost = maxConnsPerHost;
return this;
}
public ConnectionPoolConfigurationImpl setMaxTimeoutWhenExhausted(int maxTimeoutWhenExhausted) {
this.maxTimeoutWhenExhausted = maxTimeoutWhenExhausted;
return this;
}
public ConnectionPoolConfigurationImpl setMaxFailoverCount(int maxFailoverCount) {
this.maxFailoverCount = maxFailoverCount;
return this;
}
public ConnectionPoolConfigurationImpl setConnectTimeout(int connectTimeout) {
this.connectTimeout = connectTimeout;
return this;
}
public ConnectionPoolConfigurationImpl setSocketTimeout(int socketTimeout) {
this.socketTimeout = socketTimeout;
return this;
}
public ConnectionPoolConfigurationImpl setLoadBalancingStrategy(LoadBalancingStrategy strategy) {
this.lbStrategy = strategy;
return this;
}
public ConnectionPoolConfigurationImpl setRetryPolicyFactory(RetryPolicyFactory factory) {
this.retryFactory = factory;
return this;
}
public ConnectionPoolConfigurationImpl setPoolShutdownDelay(int shutdownDelaySeconds) {
poolShutdownDelay = shutdownDelaySeconds;
return this;
}
public ConnectionPoolConfigurationImpl setPingFrequencySeconds(int seconds) {
pingFrequencySeconds = seconds;
return this;
}
public ConnectionPoolConfigurationImpl setLocalZoneAffinity(boolean condition) {
localZoneAffinity = condition;
return this;
}
public ConnectionPoolConfigurationImpl setFailOnStartupIfNoHosts(boolean condition) {
this.failOnStartupIfNoHosts = condition;
return this;
}
public ConnectionPoolConfigurationImpl setFailOnStartupIfNoHostsSeconds(int seconds) {
this.failOnStarupIfNoHostsSeconds = seconds;
return this;
}
public ConnectionPoolConfigurationImpl setCompressionStrategy(CompressionStrategy compStrategy) {
this.compressionStrategy = compStrategy;
return this;
}
public ConnectionPoolConfigurationImpl setCompressionThreshold(int thresholdInBytes) {
this.valueCompressionThreshold = thresholdInBytes;
return this;
}
public ConnectionPoolConfigurationImpl setDualWriteEnabled(boolean isDualWriteEnabled) {
this.isDualWriteEnabled = isDualWriteEnabled;
return this;
}
public ConnectionPoolConfigurationImpl setDualWriteClusterName(String dualWriteClusterName) {
this.dualWriteClusterName = dualWriteClusterName;
return this;
}
public ConnectionPoolConfigurationImpl setDualWritePercentage(int dualWritePercentage) {
this.dualWritePercentage = dualWritePercentage;
return this;
}
@Override
public String getConnectionPoolConsistency() {
return this.connectionPoolConsistency;
}
public HostSupplier getHostSupplier() {
return hostSupplier;
}
public ConnectionPoolConfigurationImpl withHostSupplier(HostSupplier hSupplier) {
hostSupplier = hSupplier;
return this;
}
public TokenMapSupplier getTokenSupplier() {
return tokenSupplier;
}
public String getHashtag() {
return hashtag;
}
public ConnectionPoolConfigurationImpl withTokenSupplier(TokenMapSupplier tSupplier) {
tokenSupplier = tSupplier;
return this;
}
public HashPartitioner getHashPartitioner() {
return hashPartitioner;
}
public ConnectionPoolConfigurationImpl withHashPartitioner(HashPartitioner hPartitioner) {
hashPartitioner = hPartitioner;
return this;
}
public ConnectionPoolConfigurationImpl withHashtag(String htag) {
hashtag = htag;
return this;
}
public ConnectionPoolConfigurationImpl withErrorMonitorFactory(ErrorMonitorFactory factory) {
errorMonitorFactory = factory;
return this;
}
public ConnectionPoolConfigurationImpl withHostConnectionPoolFactory(HostConnectionPoolFactory factory) {
hostConnectionPoolFactory = factory;
return this;
}
public ConnectionPoolConfigurationImpl withHealthTrackerDelayMills(int millis) {
healthTrackerDelayMillis = millis;
return this;
}
public ConnectionPoolConfigurationImpl withPoolReconnectWaitMillis(int millis) {
poolReconnectWaitMillis = millis;
return this;
}
public static class ErrorRateMonitorConfigImpl implements ErrorRateMonitorConfig {
int window = 20;
int checkFrequency = 1;
int suppressWindow = 90;
private List<ErrorThreshold> thresholds = new ArrayList<ErrorThreshold>();
public ErrorRateMonitorConfigImpl() {
this.addThreshold(10, 10, 80);
}
public ErrorRateMonitorConfigImpl(int w, int f, int s) {
this.window = w;
this.checkFrequency = f;
this.suppressWindow = s;
}
@Override
public int getWindowSizeSeconds() {
return window;
}
@Override
public int getCheckFrequencySeconds() {
return checkFrequency;
}
@Override
public int getCheckSuppressWindowSeconds() {
return suppressWindow;
}
@Override
public List<ErrorThreshold> getThresholds() {
return thresholds;
}
public void addThreshold(final int bucketThreshold, final int bucketWindow, final int bucketCoverage) {
thresholds.add(new ErrorThreshold() {
@Override
public int getThresholdPerSecond() {
return bucketThreshold;
}
@Override
public int getWindowSeconds() {
return bucketWindow;
}
@Override
public int getWindowCoveragePercentage() {
return bucketCoverage;
}
});
}
}
public ConnectionPoolConfigurationImpl setLocalRack(String rack) {
this.localRack = rack;
return this;
}
public ConnectionPoolConfigurationImpl setLocalDataCenter(String dc) {
this.localDataCenter = dc;
return this;
}
}
| 6,030 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/MonitorConsoleMBean.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public interface MonitorConsoleMBean {
/**
* Note that monitor names == connection pool names
*
* @return A comma separated string of all registered monitors
*/
String getMonitorNames();
String getMonitorStats(String cpName);
Map<String, Map<String, List<String>>> getTopologySnapshot(String cpName);
Map<String, String> getRuntimeConfiguration(String cpName);
}
| 6,031 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/RunOnce.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.dyno.connectionpool.RetryPolicy;
/**
* Simple impl that allows at most one attempt which essentially mean no retries.
*
* @author poberai
*/
public class RunOnce implements RetryPolicy {
private AtomicInteger attempts = new AtomicInteger(0);
@Override
public void begin() {
}
@Override
public void success() {
attempts.incrementAndGet();
}
@Override
public void failure(Exception e) {
attempts.incrementAndGet();
}
@Override
public boolean allowRetry() {
return attempts.get() == 0;
}
@Override
public int getAttemptCount() {
return attempts.get() > 0 ? 1 : 0;
}
public static class RetryFactory implements RetryPolicyFactory {
@Override
public RetryPolicy getRetryPolicy() {
return new RunOnce();
}
}
@Override
public boolean allowCrossZoneFallback() {
return false;
}
@Override
public String toString() {
return "RunOnce";
}
}
| 6,032 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/MonitorConsole.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
/**
* Console that gives the admin insight into the current status of the Dyno {@link ConnectionPool}
*
* @author poberai
*/
public class MonitorConsole implements MonitorConsoleMBean {
private static final MonitorConsole Instance = new MonitorConsole();
/*package*/ static final String OBJECT_NAME = "com.netflix.dyno.connectionpool.impl:type=MonitorConsole";
public static MonitorConsole getInstance() {
return Instance;
}
private final ConcurrentHashMap<String, ConnectionPoolMonitor> cpMonitors = new ConcurrentHashMap<String, ConnectionPoolMonitor>();
private final ConcurrentHashMap<String, ConnectionPoolImpl<?>> connectionPools = new ConcurrentHashMap<String, ConnectionPoolImpl<?>>();
private MonitorConsole() {
}
@Override
public String getMonitorNames() {
return cpMonitors.keySet().toString();
}
public void addMonitorConsole(String name, ConnectionPoolMonitor monitor) {
cpMonitors.put(name, monitor);
}
public void registerConnectionPool(ConnectionPoolImpl<?> cp) {
ConnectionPoolImpl<?> cpImpl = connectionPools.putIfAbsent(cp.getName(), cp);
if (cpImpl != null) {
// Need a unique id, so append a timestamp
String name = cp.getName() + System.currentTimeMillis();
connectionPools.put(name, cp);
addMonitorConsole(name, cp.getMonitor());
} else {
addMonitorConsole(cp.getName(), cp.getMonitor());
}
}
@Override
public String getMonitorStats(String name) {
ConnectionPoolMonitor cpMonitor = cpMonitors.get(name);
if (cpMonitor == null) {
return name + " NOT FOUND";
}
StringBuilder sb = new StringBuilder();
sb
.append("ConnectionPoolMonitor(")
.append("\nConnections[")
.append(" created: ").append(cpMonitor.getConnectionCreatedCount())
.append(", closed: ").append(cpMonitor.getConnectionClosedCount())
.append(", recycled: ").append(cpMonitor.getConnectionRecycledCount())
.append(", createFailed: ").append(cpMonitor.getConnectionCreateFailedCount())
.append(", borrowed: ").append(cpMonitor.getConnectionBorrowedCount())
.append(", returned: ").append(cpMonitor.getConnectionReturnedCount())
.append(", borrowedLatMean: ").append(cpMonitor.getConnectionBorrowedLatMean())
.append(", borrowedLatP99: ").append(cpMonitor.getConnectionBorrowedLatP99())
.append("]\nOperations[")
.append(" success=").append(cpMonitor.getOperationSuccessCount())
.append(", failure=").append(cpMonitor.getOperationFailureCount())
.append(", failover=").append(cpMonitor.getFailoverCount())
.append("]\nHosts[")
.append(" add=").append(cpMonitor.getHostUpCount())
.append(", down=").append(cpMonitor.getHostDownCount())
.append("])");
Map<Host, HostConnectionStats> hostStats = cpMonitor.getHostStats();
for (Host host : hostStats.keySet()) {
if (host.getHostAddress().contains("AllHosts")) {
continue;
}
HostConnectionStats hStats = hostStats.get(host);
sb.append("\nHost: " + host.getHostAddress() + ":" + host.getPort() + ":" + host.getRack() + "\t");
sb.append(" borrowed: " + hStats.getConnectionsBorrowed());
sb.append(" returned: " + hStats.getConnectionsReturned());
sb.append(" created: " + hStats.getConnectionsCreated());
sb.append(" closed: " + hStats.getConnectionsClosed());
sb.append(" createFailed: " + hStats.getConnectionsCreateFailed());
sb.append(" errors: " + hStats.getOperationErrorCount());
sb.append(" success: " + hStats.getOperationSuccessCount());
}
sb.append("\n");
return sb.toString();
}
public TokenPoolTopology getTopology(String cpName) {
ConnectionPoolImpl<?> pool = connectionPools.get(cpName);
return (pool != null) ? pool.getTopology() : null;
}
@Override
public Map<String, Map<String, List<String>>> getTopologySnapshot(String cpName) {
Map<String, Map<String, List<String>>> snapshot =
new HashMap<String, Map<String, List<String>>>();
TokenPoolTopology topology = getTopology(cpName);
if (topology == null) {
return snapshot;
}
Map<String, List<TokenPoolTopology.TokenStatus>> map = topology.getAllTokens();
for (String rack : map.keySet()) {
List<TokenPoolTopology.TokenStatus> tokens = map.get(rack);
snapshot.put(rack, getTokenStatusMap(tokens));
}
return snapshot;
}
@Override
public Map<String, String> getRuntimeConfiguration(String cpName) {
ConnectionPoolImpl<?> pool = connectionPools.get(cpName);
if (pool != null && pool.getConfiguration() != null) {
final ConnectionPoolConfiguration cpConfig = pool.getConfiguration();
// Retain order for easy diffing across requests/nodes
final Map<String, String> config = new LinkedHashMap<>();
// Rather than use reflection to iterate and find getters, simply provide the base configuration
config.put("localRack", cpConfig.getLocalRack());
config.put("compressionStrategy", cpConfig.getCompressionStrategy().name());
config.put("compressionThreshold", String.valueOf(cpConfig.getValueCompressionThreshold()));
config.put("connectTimeout", String.valueOf(cpConfig.getConnectTimeout()));
config.put("failOnStartupIfNoHosts", String.valueOf(cpConfig.getFailOnStartupIfNoHosts()));
config.put("hostSupplier", cpConfig.getHostSupplier().toString());
config.put("loadBalancingStrategy", cpConfig.getLoadBalancingStrategy().name());
config.put("maxConnsPerHost", String.valueOf(cpConfig.getMaxConnsPerHost()));
config.put("socketTimeout", String.valueOf(cpConfig.getSocketTimeout()));
config.put("timingCountersResetFrequencyInSecs",
String.valueOf(cpConfig.getTimingCountersResetFrequencySeconds()));
config.put("replicationFactor", String.valueOf(pool.getTopology().getReplicationFactor()));
config.put("retryPolicy", pool.getConfiguration().getRetryPolicyFactory().getRetryPolicy().toString());
config.put("localRackAffinity", String.valueOf(pool.getConfiguration().localZoneAffinity()));
return Collections.unmodifiableMap(config);
}
return null;
}
private Map<String, List<String>> getTokenStatusMap(List<TokenPoolTopology.TokenStatus> tokens) {
Map<String, List<String>> map = new HashMap<String, List<String>>();
for (TokenPoolTopology.TokenStatus tokenStatus : tokens) {
String token = tokenStatus.getToken().toString();
HostConnectionPool<?> hostPool = tokenStatus.getHostPool();
List<String> meta = CollectionUtils.newArrayList(
hostPool.getHost().getHostAddress(),
hostPool.isActive() ? "UP" : "DOWN"
);
map.put(token, meta);
}
return map;
}
}
| 6,033 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/SimpleAsyncConnectionPoolImpl.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionFactory;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.impl.lb.CircularList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public class SimpleAsyncConnectionPoolImpl<CL> implements HostConnectionPool<CL> {
private static final Logger Logger = LoggerFactory.getLogger(SimpleAsyncConnectionPoolImpl.class);
private final Host host;
private final ConnectionFactory<CL> connFactory;
private final ConnectionPoolConfiguration cpConfig;
private final ConnectionPoolMonitor cpMonitor;
// state to track the connections being used
private final CircularList<Connection<CL>> rrSelector = new CircularList<Connection<CL>>(new ArrayList<Connection<CL>>());
private final ConcurrentHashMap<Connection<CL>, Connection<CL>> connMap = new ConcurrentHashMap<Connection<CL>, Connection<CL>>();
// Tracking state of host connection pool.
private final AtomicBoolean active = new AtomicBoolean(false);
private final AtomicBoolean reconnecting = new AtomicBoolean(false);
public SimpleAsyncConnectionPoolImpl(Host host, ConnectionFactory<CL> cFactory,
ConnectionPoolConfiguration config, ConnectionPoolMonitor monitor) {
this.host = host;
this.connFactory = cFactory;
this.cpConfig = config;
this.cpMonitor = monitor;
}
@Override
public Connection<CL> borrowConnection(int duration, TimeUnit unit) throws DynoException {
if (!active.get()) {
throw new DynoConnectException("Cannot connect to pool when pool is shutdown for host: " + host);
}
long start = System.currentTimeMillis();
Connection<CL> connection = rrSelector.getNextElement();
if (connection == null) {
throw new DynoConnectException("Cannot find connection for host: " + host);
}
cpMonitor.incConnectionBorrowed(host, System.currentTimeMillis() - start);
return connection;
}
@Override
public boolean returnConnection(Connection<CL> connection) {
try {
if (!active.get()) {
// Just close the connection
return closeConnection(connection);
} else {
// do nothing here
return false;
}
} finally {
cpMonitor.incConnectionReturned(host);
}
}
@Override
public boolean closeConnection(Connection<CL> connection) {
try {
Connection<CL> prevConnection = connMap.remove(connection);
if (prevConnection != null) {
connection.close();
rrSelector.removeElement(connection);
cpMonitor.incConnectionClosed(host, connection.getLastException());
}
return true;
} catch (Exception e) {
Logger.error("Failed to close connection for host: " + host, e);
return false;
} finally {
}
}
@Override
public void recycleConnection(Connection<CL> connection) {
this.closeConnection(connection);
cpMonitor.incConnectionReturned(host);
createConnection();
cpMonitor.incConnectionRecycled(host);
}
@Override
public void markAsDown(DynoException reason) {
if (!active.get()) {
return; // already marked as down
}
active.compareAndSet(true, false);
}
@Override
public void reconnect() {
if (active.get()) {
Logger.info("Pool already active, ignoring reconnect connections request");
return;
}
if (reconnecting.get()) {
Logger.info("Pool already reconnecting, ignoring reconnect connections request");
return;
}
if (!(reconnecting.compareAndSet(false, true))) {
Logger.info("Pool already reconnecting, ignoring reconnect connections request");
return;
}
try {
shutdown();
primeConnections();
} finally {
reconnecting.set(false);
}
}
@Override
public void shutdown() {
Logger.info("Shutting down connection pool for host:" + host);
active.set(false);
for (Connection<CL> connection : connMap.keySet()) {
closeConnection(connection);
}
connMap.clear();
}
@Override
public int primeConnections() throws DynoException {
Logger.info("Priming connection pool for host:" + host);
if (active.get()) {
throw new DynoException("Connection pool has already been inited, cannot prime connections for host:" + host);
}
int created = 0;
for (int i = 0; i < cpConfig.getMaxConnsPerHost(); i++) {
try {
createConnection();
created++;
} catch (DynoConnectException e) {
Logger.error("Failed to create connection", e);
cpMonitor.incConnectionCreateFailed(host, e);
throw e;
}
}
active.compareAndSet(false, true);
return created;
}
@Override
public Collection<Connection<CL>> getAllConnections() {
return connMap.keySet();
}
@Override
public int getConnectionTimeout() {
return cpConfig.getConnectTimeout();
}
@Override
public int getSocketTimeout() {
return cpConfig.getSocketTimeout();
}
private Connection<CL> createConnection() throws DynoException {
Connection<CL> connection = connFactory.createConnection(this);
connMap.put(connection, connection);
connection.open();
rrSelector.addElement(connection);
cpMonitor.incConnectionCreated(host);
return connection;
}
@Override
public Host getHost() {
return host;
}
@Override
public boolean isActive() {
return active.get();
}
@Override
public boolean isShutdown() {
return !active.get();
}
@Override
public int size() {
return rrSelector.getSize();
}
}
| 6,034 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/MonitorConsoleResource.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.json.simple.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.TokenPoolTopology;
import com.netflix.dyno.connectionpool.TokenPoolTopology.TokenStatus;
@Path("/dyno/console")
public class MonitorConsoleResource {
private static final Logger Logger = LoggerFactory.getLogger(MonitorConsoleResource.class);
public MonitorConsoleResource() {
Logger.info("LOADED MonitorConsoleResource");
}
@Path("/monitors")
@GET
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.TEXT_PLAIN)
public String getMonitorNames() {
return MonitorConsole.getInstance().getMonitorNames();
}
@Path("/monitor/{monitorName}")
@GET
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.TEXT_PLAIN)
public String getMonitorStats(@PathParam("monitorName") String monitorName) {
return MonitorConsole.getInstance().getMonitorStats(monitorName);
}
@SuppressWarnings("unchecked")
@Path("/monitor/{cpName}/configuration")
@GET
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
public String getConnectionPoolConfiguration(@PathParam("cpName") String cpName) {
JSONObject json = new JSONObject();
Map<String, String> config = MonitorConsole.getInstance().getRuntimeConfiguration(cpName);
if (config != null) {
for (Map.Entry<String, String> entry : config.entrySet()) {
json.put(entry.getKey(), entry.getValue());
}
}
return json.toJSONString();
}
@Path("/topologies")
@GET
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.TEXT_PLAIN)
public String getConnectionPoolNames() {
return MonitorConsole.getInstance().getMonitorNames();
}
@SuppressWarnings("unchecked")
@Path("/topology/{cpName}")
@GET
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
public String getConnectionPoolToplogy(@PathParam("cpName") String cpName) {
TokenPoolTopology topology = MonitorConsole.getInstance().getTopology(cpName);
if (topology == null) {
return "Not Found: " + cpName;
}
ConcurrentHashMap<String, List<TokenStatus>> map = topology.getAllTokens();
JSONObject json = new JSONObject();
for (String rack : map.keySet()) {
List<TokenStatus> tokens = map.get(rack);
json.put(rack, getTokenStatusMap(tokens));
}
return json.toJSONString();
}
private Map<String, String> getTokenStatusMap(List<TokenStatus> tokens) {
Map<String, String> map = new HashMap<String, String>();
for (TokenStatus tokenStatus : tokens) {
String token = tokenStatus.getToken().toString();
HostConnectionPool<?> hostPool = tokenStatus.getHostPool();
String poolStatus = hostPool.getHost().getHostAddress() + "__" + (hostPool.isActive() ? "UP" : "DOWN");
map.put(token, poolStatus);
}
return map;
}
}
| 6,035 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/OperationResultImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.OperationMonitor;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Impl for {@link OperationResult}
* It tracks operation result, op attempts, latency, execution host etc
*
* @param <R>
* @author poberai
*/
public class OperationResultImpl<R> implements OperationResult<R> {
private final String opName;
private final R result;
private final Future<R> futureResult;
private Host host = null;
private long duration = 0;
private int attempts = 0;
private final OperationMonitor opMonitor;
private final ConcurrentHashMap<String, String> metadata = new ConcurrentHashMap<String, String>();
public OperationResultImpl(String name, R r, OperationMonitor monitor) {
opName = name;
result = r;
futureResult = null;
opMonitor = monitor;
}
public OperationResultImpl(String name, Future<R> future, OperationMonitor monitor) {
opName = name;
result = null;
futureResult = future;
opMonitor = monitor;
}
@Override
public Host getNode() {
return host;
}
@Override
public R getResult() {
try {
return futureResult != null ? futureResult.get() : result;
} catch (Exception e) {
throw new DynoException(e);
}
}
@Override
public long getLatency() {
return duration;
}
@Override
public long getLatency(TimeUnit units) {
return units.convert(duration, TimeUnit.MILLISECONDS);
}
@Override
public int getAttemptsCount() {
return attempts;
}
@Override
public OperationResultImpl<R> setAttemptsCount(int count) {
attempts = count;
return this;
}
public OperationResultImpl<R> setNode(Host h) {
host = h;
return this;
}
public OperationResultImpl<R> attempts(int count) {
attempts = count;
return this;
}
public OperationResultImpl<R> latency(long time) {
this.duration = time;
if (opMonitor != null) {
opMonitor.recordLatency(opName, time, TimeUnit.MILLISECONDS);
}
return this;
}
@Override
public OperationResultImpl<R> setLatency(long time, TimeUnit unit) {
this.duration = TimeUnit.MILLISECONDS.convert(time, unit);
if (opMonitor != null) {
opMonitor.recordLatency(opName, time, unit);
}
return this;
}
@Override
public Map<String, String> getMetadata() {
return metadata;
}
@Override
public OperationResultImpl<R> addMetadata(String key, String value) {
metadata.put(key, value);
return this;
}
@Override
public OperationResultImpl<R> addMetadata(Map<String, Object> map) {
for (String key : map.keySet()) {
metadata.put(key, map.get(key).toString());
}
return this;
}
}
| 6,036 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/RetryNTimes.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.dyno.connectionpool.RetryPolicy;
/**
* Simple implementation of {@link RetryPolicy} that ensures an operation can be re tried at most N times.
* <p>
* Note that RetryNTimes (2) means that a total of 2 + 1 = 3 attempts will be allowed before giving up.
*
* @author poberai
*/
public class RetryNTimes implements RetryPolicy {
private int n;
private final AtomicReference<RetryState> state = new AtomicReference<>(new RetryState(0, false));
private final boolean allowCrossZoneFallback;
public RetryNTimes(int n, boolean allowFallback) {
this.n = n;
this.allowCrossZoneFallback = allowFallback;
}
@Override
public void begin() {
}
@Override
public void success() {
boolean success = false;
RetryState rs;
while (!success) {
rs = state.get();
success = state.compareAndSet(rs, new RetryState(rs.count + 1, true));
}
}
@Override
public void failure(Exception e) {
boolean success = false;
RetryState rs;
while (!success) {
rs = state.get();
success = state.compareAndSet(rs, new RetryState(rs.count + 1, false));
}
}
@Override
public boolean allowRetry() {
final RetryState rs = state.get();
return !rs.success && rs.count <= n;
}
@Override
public int getAttemptCount() {
return state.get().count;
}
@Override
public boolean allowCrossZoneFallback() {
return allowCrossZoneFallback;
}
@Override
public String toString() {
return "RetryNTimes{" +
"n=" + n +
", state=" + state.get() +
", allowCrossZoneFallback=" + allowCrossZoneFallback +
'}';
}
public static class RetryFactory implements RetryPolicyFactory {
int n;
boolean allowCrossZoneFallback;
public RetryFactory(int n) {
this(n, true);
}
public RetryFactory(int n, boolean allowFallback) {
this.n = n;
this.allowCrossZoneFallback = allowFallback;
}
@Override
public RetryPolicy getRetryPolicy() {
return new RetryNTimes(n, allowCrossZoneFallback);
}
@Override
public String toString() {
return "RetryFactory{" +
"n=" + n +
", allowCrossZoneFallback=" + allowCrossZoneFallback +
'}';
}
}
private class RetryState {
private final int count;
private final boolean success;
public RetryState(final int count, final boolean success) {
this.count = count;
this.success = success;
}
@Override
public String toString() {
return "RetryState{" +
"count=" + count +
", success=" + success +
'}';
}
}
}
| 6,037 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/HostConnectionPoolFactory.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
public interface HostConnectionPoolFactory<CL> {
HostConnectionPool<CL> createHostConnectionPool(Host host, ConnectionPoolImpl<CL> parentPoolImpl);
enum Type {
/** Asynchronous, non-blocking instance */
Async,
/** Synchronous, blocking instance */
Sync;
}
}
| 6,038 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/HostStatusTracker.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.Host.Status;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.HostSupplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class that can be used in conjunction with a {@link HostSupplier} repeatedly to understand whether the change within the
* active and inactive host set.
* <p>
* Implementations of {@link ConnectionPool} can then use this utility to adapt to topology changes and hence manage the corresponding
* {@link HostConnectionPool} objects for the set of active hosts.
* <p>
* Note the behavior of this class is such that if a host <em>disappears</em> and it was last known as <em>active</em>
* then it will be moved to <em>inactive</em>. If however it disappears and its last known state was <em>inactive</em>
* then it is removed from tracking altogether. This is to support terminations/node replacements.
*
* @author poberai
*/
public class HostStatusTracker {
private static final Logger logger = LoggerFactory.getLogger(HostStatusTracker.class);
// the set of active and inactive hosts
private final Set<Host> activeHosts = new HashSet<Host>();
private final Set<Host> inactiveHosts = new HashSet<Host>();
public HostStatusTracker() {
}
public HostStatusTracker(Collection<Host> up, Collection<Host> down) {
verifyMutuallyExclusive(up, down);
activeHosts.addAll(up);
inactiveHosts.addAll(down);
}
/**
* Helper method to check that there is no overlap b/w hosts up and down.
*
* @param A
* @param B
*/
private void verifyMutuallyExclusive(Collection<Host> A, Collection<Host> B) {
Set<Host> left = new HashSet<Host>(A);
Set<Host> right = new HashSet<Host>(B);
boolean modified = left.removeAll(right);
if (modified) {
throw new RuntimeException("Host up and down sets are not mutually exclusive!");
}
}
/**
* All we need to check here is that whether the new active set is not exactly the same as the
* prev active set. If there are any new hosts that have been added or any hosts that are missing
* then return 'true' indicating that the active set has changed.
*
* @param hostsUp
* @return true/false indicating whether the active set has changed from the previous set.
*/
public boolean activeSetChanged(Collection<Host> hostsUp) {
return !hostsUp.equals(activeHosts);
}
/**
* This check is more involved than the active set check. Here we 2 conditions to check for
* <p>
* 1. We could have new hosts that were in the active set and have shown up in the inactive set.
* 2. We can also have the case where hosts from the active set have disappeared and also not in the provided inactive set.
* This is where we have simply forgotten about some active host and that it needs to be shutdown
*
* @param hostsUp
* @param hostsDown
* @return true/false indicating whether we have a host that has been shutdown
*/
public boolean inactiveSetChanged(Collection<Host> hostsUp, Collection<Host> hostsDown) {
boolean newInactiveHostsFound = false;
// Check for condition 1.
for (Host hostDown : hostsDown) {
if (activeHosts.contains(hostDown)) {
newInactiveHostsFound = true;
break;
}
}
// Check for condition 2.
Set<Host> prevActiveHosts = new HashSet<Host>(activeHosts);
prevActiveHosts.removeAll(hostsUp);
newInactiveHostsFound = !prevActiveHosts.isEmpty();
return newInactiveHostsFound;
}
/**
* Helper method that checks if anything has changed b/w the current state and the new set of hosts up and down
*
* @param hostsUp
* @param hostsDown
* @return true/false indicating whether the set of hosts has changed or not.
*/
public boolean checkIfChanged(Collection<Host> hostsUp, Collection<Host> hostsDown) {
boolean changed = activeSetChanged(hostsUp) || inactiveSetChanged(hostsUp, hostsDown);
if (changed && logger.isDebugEnabled()) {
Set<Host> changedHostsUp = new HashSet<>(hostsUp);
changedHostsUp.removeAll(activeHosts);
changedHostsUp.forEach(x -> logger.debug("New host up: {}", x.getHostAddress()));
Set<Host> changedHostsDown = new HashSet<>(hostsDown);
changedHostsDown.removeAll(inactiveHosts);
changedHostsDown.forEach(x -> logger.debug("New host down: {}", x.getHostAddress()));
}
return changed;
}
/**
* Helper method that actually changes the state of the class to reflect the new set of hosts up and down
* Note that the new HostStatusTracker is returned that holds onto the new state. Calling classes must update their
* references to use the new HostStatusTracker
*
* @param hostsUp
* @param hostsDown
* @return
*/
public HostStatusTracker computeNewHostStatus(Collection<Host> hostsUp, Collection<Host> hostsDown) {
verifyMutuallyExclusive(hostsUp, hostsDown);
Set<Host> nextActiveHosts = new HashSet<Host>(hostsUp);
// Get the hosts that are currently down
Set<Host> nextInactiveHosts = new HashSet<Host>(hostsDown);
// add any previous hosts that were currently down iff they are still reported by the HostSupplier
Set<Host> union = new HashSet<>(hostsUp);
union.addAll(hostsDown);
if (!union.containsAll(inactiveHosts)) {
logger.info("REMOVING at least one inactive host from {} b/c it is no longer reported by HostSupplier",
inactiveHosts);
inactiveHosts.retainAll(union);
}
nextInactiveHosts.addAll(inactiveHosts);
// Now remove from the total set of inactive hosts any host that is currently up.
// This typically happens when a host moves from the inactive state to the active state.
// And hence it will be there in the prev inactive set, and will also be there in the new active set
// for this round.
for (Host host : nextActiveHosts) {
nextInactiveHosts.remove(host);
}
// Now add any host that is not in the new active hosts set and that was in the previous active set
Set<Host> prevActiveHosts = new HashSet<Host>(activeHosts);
prevActiveHosts.removeAll(hostsUp);
// If anyone is remaining in the prev set then add it to the inactive set, since it has gone away
nextInactiveHosts.addAll(prevActiveHosts);
for (Host host : nextActiveHosts) {
host.setStatus(Status.Up);
}
for (Host host : nextInactiveHosts) {
host.setStatus(Status.Down);
}
return new HostStatusTracker(nextActiveHosts, nextInactiveHosts);
}
public boolean isHostUp(Host host) {
return activeHosts.contains(host);
}
public Collection<Host> getActiveHosts() {
return activeHosts;
}
public Collection<Host> getInactiveHosts() {
return inactiveHosts;
}
/**
* Returns the total number of hosts being tracked by this instance. Note that this is calculated
* on every invocation.
*
* @return Integer
*/
public int getHostCount() {
// The host collections are never null since they are initialized during construction of this instance.
return activeHosts.size() + inactiveHosts.size();
}
public String toString() {
return "HostStatusTracker \nactiveSet: " + activeHosts.toString() + "\ninactiveSet: " + inactiveHosts.toString();
}
}
| 6,039 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/LastOperationMonitor.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.dyno.connectionpool.OperationMonitor;
/**
* Simple in memory map based impl of {@link OperationMonitor}
* Mainly used for testing.
*
* @author poberai
*/
public class LastOperationMonitor implements OperationMonitor {
private final ConcurrentHashMap<String, Long> latestTimings = new ConcurrentHashMap<String, Long>();
private final ConcurrentHashMap<String, AtomicInteger> opCounters = new ConcurrentHashMap<String, AtomicInteger>();
private final ConcurrentHashMap<String, AtomicInteger> opFailureCounters = new ConcurrentHashMap<String, AtomicInteger>();
@Override
public void recordLatency(String opName, long duration, TimeUnit unit) {
latestTimings.put(opName, TimeUnit.MILLISECONDS.convert(duration, unit));
}
@Override
public void recordSuccess(String opName) {
AtomicInteger count = opCounters.get(opName);
if (count == null) {
opCounters.put(opName, new AtomicInteger(1));
} else {
count.incrementAndGet();
}
}
@Override
public void recordSuccess(String opName, boolean compressionEnabled) {
String name = opName + "_" + compressionEnabled;
AtomicInteger count = opCounters.get(name);
if (count == null) {
opCounters.put(name, new AtomicInteger(1));
} else {
count.incrementAndGet();
}
}
@Override
public void recordFailure(String opName, String reason) {
AtomicInteger count = opFailureCounters.get(opName);
if (count == null) {
opFailureCounters.put(opName, new AtomicInteger(1));
} else {
count.incrementAndGet();
}
}
@Override
public void recordFailure(String opName, boolean compressionEnabled, String reason) {
String name = opName + "_" + compressionEnabled;
AtomicInteger count = opCounters.get(name);
if (count == null) {
opCounters.put(name, new AtomicInteger(1));
} else {
count.incrementAndGet();
}
}
public Integer getSuccessCount(String opName) {
return opCounters.get(opName).get();
}
public Integer getSuccessCount(String opName, boolean compressionEnabled) {
return opCounters.get(opName + "_" + compressionEnabled).get();
}
}
| 6,040 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/HostsUpdater.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
public class HostsUpdater {
private static final Logger Logger = LoggerFactory.getLogger(ConnectionPoolImpl.class);
private final HostSupplier hostSupplier;
private final TokenMapSupplier tokenMapSupplier;
private final AtomicBoolean stop = new AtomicBoolean(false);
private final AtomicReference<HostStatusTracker> hostTracker = new AtomicReference<HostStatusTracker>(null);
public HostsUpdater(HostSupplier hSupplier, TokenMapSupplier tokenMapSupplier) {
this.hostSupplier = hSupplier;
this.tokenMapSupplier = tokenMapSupplier;
this.hostTracker.set(new HostStatusTracker());
}
public HostStatusTracker refreshHosts() {
if (stop.get() || Thread.currentThread().isInterrupted()) {
return null;
}
List<Host> allHostsFromHostSupplier = hostSupplier.getHosts();
if (allHostsFromHostSupplier == null || allHostsFromHostSupplier.isEmpty()) {
throw new NoAvailableHostsException("No available hosts when starting HostsUpdater");
}
List<Host> hostsUpFromHostSupplier = new ArrayList<>();
List<Host> hostsDownFromHostSupplier = new ArrayList<>();
for (Host host : allHostsFromHostSupplier) {
if (host.isUp()) {
hostsUpFromHostSupplier.add(host);
} else {
hostsDownFromHostSupplier.add(host);
}
}
// if nothing has changed, just return the earlier hosttracker.
if (!hostTracker.get().checkIfChanged(new HashSet<>(hostsUpFromHostSupplier), new HashSet<>(hostsDownFromHostSupplier))) {
return hostTracker.get();
}
/**
* HostTracker should return the hosts that we get from TokenMapSupplier.
* Hence get the hosts from HostSupplier and map them to TokenMapSupplier
* and return them.
*/
Collections.sort(allHostsFromHostSupplier);
Set<Host> hostSet = new HashSet<>(allHostsFromHostSupplier);
// Create a list of host/Tokens
List<HostToken> hostTokens;
if (tokenMapSupplier != null) {
Logger.info("Getting Hosts from TokenMapSupplier");
hostTokens = tokenMapSupplier.getTokens(hostSet);
if (hostTokens.isEmpty()) {
throw new DynoException("No hosts in the TokenMapSupplier");
}
} else {
throw new DynoException("TokenMapSupplier not provided");
}
// The key here really needs to be a object that is overlapping between
// the host from HostSupplier and TokenMapSupplier. Since that is a
// subset of the Host object itself, Host is the key as well as value here.
Map<Host, Host> allHostSetFromTokenMapSupplier = new HashMap<>();
for (HostToken ht : hostTokens) {
allHostSetFromTokenMapSupplier.put(ht.getHost(), ht.getHost());
}
hostsUpFromHostSupplier.clear();
hostsDownFromHostSupplier.clear();
for (Host hostFromHostSupplier : allHostsFromHostSupplier) {
if (hostFromHostSupplier.isUp()) {
Host hostFromTokenMapSupplier = allHostSetFromTokenMapSupplier.get(hostFromHostSupplier);
if (hostFromTokenMapSupplier == null) {
throw new DynoException("Could not find " + hostFromHostSupplier.getHostName() + " in token map supplier.");
}
// XXX: There's slight subtlety here. We get the hostname and IPAddress from 'hostFromHostSupplier'
// which is derived from the HostSupplier, whereas we get everything else from the 'hostFromTokenMapSupplier'
// which is basically derived from the AbstractTokenMapSupplier.
// The reason for this subtlety is due to the fact that the host supplier returns the preferred IPAddress
// and the AbstractTokenMapSupplier may return an alternative IPAddress (public IP vs. private IP) that
// we may not be able to access. (The same applies to the following 'else' case.)
//
// Why does the AbstractTokenMapSupplier return public IPs?
// This is because the AbstractTokenMapSupplier derives its values from dynomite-manager which
// returns the topology seen by it and the server processes, and they use Public IPs to communicate with
// each other.
// TODO: Can we ensure that both the HostSupplier and AbstractTokenMapSupplier agree on the same values?
HostBuilder upHostBuilder = new HostBuilder()
.setHostname(hostFromHostSupplier.getHostName())
.setIpAddress(hostFromHostSupplier.getIpAddress())
.setStatus(Host.Status.Up);
upHostBuilder.setPort(hostFromTokenMapSupplier.getPort())
.setSecurePort(hostFromTokenMapSupplier.getSecurePort())
.setDatastorePort(hostFromTokenMapSupplier.getDatastorePort())
.setRack(hostFromTokenMapSupplier.getRack())
.setDatacenter(hostFromTokenMapSupplier.getDatacenter())
.setHashtag(hostFromHostSupplier.getHashtag())
.setPassword(hostFromTokenMapSupplier.getPassword());
hostsUpFromHostSupplier.add(upHostBuilder.createHost());
allHostSetFromTokenMapSupplier.remove(hostFromTokenMapSupplier);
} else {
Host hostFromTokenMapSupplier = allHostSetFromTokenMapSupplier.get(hostFromHostSupplier);
HostBuilder downHostBuilder = new HostBuilder()
.setHostname(hostFromHostSupplier.getHostName())
.setIpAddress(hostFromHostSupplier.getIpAddress())
.setStatus(Host.Status.Down);
downHostBuilder.setPort(hostFromTokenMapSupplier.getPort())
.setSecurePort(hostFromTokenMapSupplier.getSecurePort())
.setDatastorePort(hostFromTokenMapSupplier.getDatastorePort())
.setRack(hostFromTokenMapSupplier.getRack())
.setDatacenter(hostFromTokenMapSupplier.getDatacenter())
.setHashtag(hostFromHostSupplier.getHashtag())
.setPassword(hostFromTokenMapSupplier.getPassword());
hostsDownFromHostSupplier.add(downHostBuilder.createHost());
allHostSetFromTokenMapSupplier.remove(hostFromTokenMapSupplier);
}
}
// if a node is down, it might be absent in hostSupplier but has its presence in TokenMapSupplier.
// Add that host to the down list here.
for (Host h : allHostSetFromTokenMapSupplier.keySet()) {
hostsDownFromHostSupplier.add(Host.clone(h).setStatus(Host.Status.Down));
}
HostStatusTracker newTracker = hostTracker.get().computeNewHostStatus(hostsUpFromHostSupplier, hostsDownFromHostSupplier);
hostTracker.set(newTracker);
return hostTracker.get();
}
public void stop() {
stop.set(true);
}
}
| 6,041 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/HostConnectionPoolImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionFactory;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.RetryPolicy;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.PoolExhaustedException;
import com.netflix.dyno.connectionpool.exception.PoolOfflineException;
import com.netflix.dyno.connectionpool.exception.PoolTimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
* Main impl for {@link HostConnectionPool}
* <p>
* This class does not allow shared access to the connections being managed for this pool.
* Hence it uses a {@link LinkedBlockingQueue} to manage the available connections.
* When a connection needs to be borrowed, we wait or poll the queue. As connections are returned, they are added back into the queue.
* This is the normal behavior during the "Active" state of this pool.
* <p>
* The class also manages another state called "Inactive" where it can be put "Down" where it stops accepting requests for borrowing more connections,
* and simply terminates every connection that is returned to it. This is generally useful when the host is going away, or where the error rate
* from the connections of this pool are greater than a configured error threshold and then an external component decides to recycle the connection pool.
*
* @param <CL>
* @author poberai
*/
public class HostConnectionPoolImpl<CL> implements HostConnectionPool<CL> {
private static final Logger Logger = LoggerFactory.getLogger(HostConnectionPoolImpl.class);
private static final int CONNECTION_CREATE_RETRY_CNT = 3;
// The connections available for this connection pool
private final LinkedBlockingQueue<Connection<CL>> availableConnections = new LinkedBlockingQueue<Connection<CL>>();
// Track the no of connections open (both available and in use)
private final AtomicInteger numActiveConnections = new AtomicInteger(0);
// Private members required by this class
private final Host host;
private final ConnectionFactory<CL> connFactory;
private final ConnectionPoolConfiguration cpConfig;
private final ConnectionPoolMonitor monitor;
// states that dictate the behavior of the pool
// cp not inited is the starting state of the pool. The pool will not allow connections to be borrowed in this state
private final ConnectionPoolState<CL> cpNotInited = new ConnectionPoolNotInited();
// cp active is where connections of the pool can be borrowed and returned
private final ConnectionPoolState<CL> cpActive = new ConnectionPoolActive(this);
// cp reconnecting is where connections cannot be borrowed and all returning connections will be shutdown
private final ConnectionPoolState<CL> cpReconnecting = new ConnectionPoolReconnectingOrDown();
// similar to reconnecting
private final ConnectionPoolState<CL> cpDown = new ConnectionPoolReconnectingOrDown();
// The thread safe reference to the pool state
private final AtomicReference<ConnectionPoolState<CL>> cpState = new AtomicReference<ConnectionPoolState<CL>>(cpNotInited);
public HostConnectionPoolImpl(Host host, ConnectionFactory<CL> conFactory,
ConnectionPoolConfiguration cpConfig, ConnectionPoolMonitor poolMonitor) {
this.host = host;
this.connFactory = conFactory;
this.cpConfig = cpConfig;
this.monitor = poolMonitor;
}
@Override
public Connection<CL> borrowConnection(int duration, TimeUnit unit) throws DynoException {
return cpState.get().borrowConnection(duration, unit);
}
@Override
public boolean returnConnection(Connection<CL> connection) {
return cpState.get().returnConnection(connection);
}
@Override
public boolean closeConnection(Connection<CL> connection) {
return cpState.get().closeConnection(connection);
}
@Override
public void recycleConnection(Connection<CL> connection) {
cpState.get().recycleConnection(connection);
}
@Override
public void markAsDown(DynoException reason) {
if (Logger.isDebugEnabled()) {
Logger.debug(String.format("Marking Host Connection Pool %s DOWN", getHost()));
}
ConnectionPoolState<CL> currentState = cpState.get();
if (currentState == cpDown) {
if (Logger.isDebugEnabled()) {
Logger.debug("CP is already down, hence ignoring mark as down request");
}
return;
}
if (!(cpState.compareAndSet(currentState, cpDown))) {
// someone already beat us to it
return;
}
monitor.hostDown(host, reason);
monitor.resetConnectionBorrowedLatStats(); // NOTE - SIDE EFFECT
}
@Override
public void reconnect() {
markAsDown(null);
reconnect(cpDown);
if (cpState.get() == cpActive) {
monitor.hostUp(host, this);
}
}
@Override
public void shutdown() {
Logger.info("Shutting down connection pool for host:" + host);
cpState.set(cpDown);
List<Connection<CL>> connections = new ArrayList<Connection<CL>>();
availableConnections.drainTo(connections);
for (Connection<CL> connection : connections) {
cpState.get().closeConnection(connection);
}
}
@Override
public int primeConnections() throws DynoException {
Logger.info("Priming connection pool for host:" + host + ", with conns:" + cpConfig.getMaxConnsPerHost());
if (cpState.get() != cpNotInited) {
throw new DynoException("Connection pool has already been inited, cannot prime connections for host:" + host);
}
int primedConnectionCount = reconnect(cpNotInited);
if (primedConnectionCount == 0) {
Logger.warn("Unable to make any successful connections to host " + host);
cpState.set(cpNotInited);
throw new DynoConnectException("Unable to make ANY successful connections to host " + host);
}
return primedConnectionCount;
}
private int reconnect(ConnectionPoolState<CL> prevState) throws DynoException {
if (!(cpState.compareAndSet(prevState, cpReconnecting))) {
Logger.info("Reconnect connections already called by someone else, ignoring reconnect connections request");
return 0;
}
int successfullyCreated = 0;
for (int i = 0; i < cpConfig.getMaxConnsPerHost(); i++) {
boolean success = createConnectionWithRetries();
if (success) {
successfullyCreated++;
}
}
if (successfullyCreated == cpConfig.getMaxConnsPerHost()) {
if (!(cpState.compareAndSet(cpReconnecting, cpActive))) {
throw new IllegalStateException("something went wrong with prime connections");
}
} else {
if (!(cpState.compareAndSet(cpReconnecting, cpDown))) {
throw new IllegalStateException("something went wrong with prime connections");
}
}
return successfullyCreated;
}
private boolean createConnectionWithRetries() {
boolean success = false;
RetryPolicy retry = new RetryNTimes.RetryFactory(CONNECTION_CREATE_RETRY_CNT).getRetryPolicy();
retry.begin();
while (retry.allowRetry()) {
try {
cpActive.createConnection();
retry.success();
success = true;
break;
} catch (DynoException e) {
retry.failure(e);
}
}
return success;
}
@Override
public Host getHost() {
return host;
}
@Override
public boolean isActive() {
return cpState.get() == cpActive;
}
@Override
public boolean isShutdown() {
return cpState.get() == cpDown;
}
/**
* DO NOT call this method on this pool. This pool needs to manage shared thread safe access to connections
* and hence at any given time all connections are being used by some operation.
* In any case getAllConnections() is meant for ping based active monitoring of connections which is not needed for this
* pool since it is "sync" in nature. For sync pools we collect feedback from the operations directly and relay that to
* ConnectionPoolHealthChecker.
*/
@Override
public Collection<Connection<CL>> getAllConnections() {
throw new RuntimeException("Not Implemented");
}
@Override
public int getConnectionTimeout() {
return cpConfig.getConnectTimeout();
}
@Override
public int getSocketTimeout() {
return cpConfig.getSocketTimeout();
}
@Override
public int size() {
return cpState.get().connectionsCount();
}
private interface ConnectionPoolState<CL> {
Connection<CL> createConnection();
Connection<CL> borrowConnection(int duration, TimeUnit unit);
boolean returnConnection(Connection<CL> connection);
boolean closeConnection(Connection<CL> connection);
void recycleConnection(Connection<CL> connection);
int connectionsCount();
}
private class ConnectionPoolActive implements ConnectionPoolState<CL> {
private final HostConnectionPoolImpl<CL> pool;
private ConnectionPoolActive(HostConnectionPoolImpl<CL> cp) {
pool = cp;
}
@Override
public Connection<CL> createConnection() {
try {
Connection<CL> connection;
if (cpConfig.isConnectToDatastore()) {
connection = connFactory.createConnectionWithDataStore(pool);
} else if (cpConfig.isConnectionPoolConsistencyProvided()) {
connection = connFactory.createConnectionWithConsistencyLevel(pool, cpConfig.getConnectionPoolConsistency());
} else {
connection = connFactory.createConnection(pool);
}
connection.open();
availableConnections.add(connection);
monitor.incConnectionCreated(host);
numActiveConnections.incrementAndGet();
return connection;
} catch (DynoConnectException e) {
/* adding error log under debug flag to avoid flooding log lines
while debugging specific error scenarios.
*/
if (Logger.isDebugEnabled()) {
if (monitor.getConnectionCreateFailedCount() % 10000 == 0) {
Logger.error("Failed to create connection", e);
}
}
monitor.incConnectionCreateFailed(host, e);
throw e;
} catch (RuntimeException e) {
if (monitor.getConnectionCreateFailedCount() % 10000 == 0) {
Logger.error("Failed to create connection", e);
}
monitor.incConnectionCreateFailed(host, e);
throw new DynoConnectException(e);
}
}
@Override
public boolean returnConnection(Connection<CL> connection) {
try {
if (numActiveConnections.get() > cpConfig.getMaxConnsPerHost()) {
// Just close the connection
return closeConnection(connection);
} else {
// Add the given connection back to the pool
availableConnections.add(connection);
return false;
}
} finally {
monitor.incConnectionReturned(host);
}
}
@Override
public boolean closeConnection(Connection<CL> connection) {
try {
connection.close();
return true;
} catch (Exception e) {
Logger.error("Failed to close connection for host: " + host + " " + e.getMessage());
return false;
} finally {
numActiveConnections.decrementAndGet();
monitor.incConnectionClosed(host, connection.getLastException());
}
}
@Override
public void recycleConnection(Connection<CL> connection) {
this.closeConnection(connection);
monitor.incConnectionReturned(host);
// Create a new connection and add it to pool
if (createConnectionWithRetries()) {
monitor.incConnectionRecycled(host);
} else {
Logger.error("Connection recycle failed to create a new connection");
}
}
@Override
public int connectionsCount() {
return numActiveConnections.get();
}
@Override
public Connection<CL> borrowConnection(int duration, TimeUnit unit) {
if (numActiveConnections.get() < 1) {
// Need to throw something other than DynoConnectException in order to bubble past HostSelectionWithFallback
// Is that the right thing to do ???
throw new PoolExhaustedException(HostConnectionPoolImpl.this,
"Fast fail - NO ACTIVE CONNECTIONS in pool").setHost(getHost());
}
// Start recording how long it takes to get the connection - for insight/metrics
long startTime = System.nanoTime() / 1000;
Connection<CL> conn = null;
try {
// wait on the connection pool with a timeout
conn = availableConnections.poll(duration, unit);
} catch (InterruptedException e) {
Logger.info("Thread interrupted when waiting on connections");
throw new DynoConnectException(e);
}
long delay = System.nanoTime() / 1000 - startTime;
if (conn == null) {
throw new PoolTimeoutException("Fast fail waiting for connection from pool")
.setHost(getHost())
.setLatency(delay);
}
monitor.incConnectionBorrowed(host, delay);
return conn;
}
}
private class ConnectionPoolReconnectingOrDown implements ConnectionPoolState<CL> {
private ConnectionPoolReconnectingOrDown() {
}
@Override
public Connection<CL> createConnection() {
throw new PoolOfflineException(getHost(), "Cannot create new connection when pool is down");
}
@Override
public Connection<CL> borrowConnection(int duration, TimeUnit unit) {
throw new PoolOfflineException(getHost(), "Cannot borrow connection when pool is down");
}
@Override
public boolean returnConnection(Connection<CL> connection) {
monitor.incConnectionReturned(host);
return closeConnection(connection);
}
@Override
public boolean closeConnection(Connection<CL> connection) {
try {
connection.close();
return true;
} catch (Exception e) {
Logger.warn("Failed to close connection for host: " + host + " " + e.getMessage());
return false;
} finally {
numActiveConnections.decrementAndGet();
monitor.incConnectionClosed(host, connection.getLastException());
}
}
@Override
public void recycleConnection(Connection<CL> connection) {
this.closeConnection(connection);
}
@Override
public int connectionsCount() {
return 0;
}
}
private class ConnectionPoolNotInited implements ConnectionPoolState<CL> {
private ConnectionPoolNotInited() {
}
@Override
public Connection<CL> createConnection() {
throw new DynoConnectException("Pool must be initialized first");
}
@Override
public Connection<CL> borrowConnection(int duration, TimeUnit unit) {
throw new DynoConnectException("Pool must be initialized first");
}
@Override
public boolean returnConnection(Connection<CL> connection) {
throw new DynoConnectException("Pool must be initialized first");
}
@Override
public boolean closeConnection(Connection<CL> connection) {
throw new DynoConnectException("Pool must be initialized first");
}
@Override
public void recycleConnection(Connection<CL> connection) {
throw new DynoConnectException("Pool must be initialized first");
}
@Override
public int connectionsCount() {
return 0;
}
}
public String toString() {
return "HostConnectionPool: [Host: " + host + ", Pool active: " + isActive() + "]";
}
}
| 6,042 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/CountingConnectionPoolMonitor.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.HostConnectionStats;
import com.netflix.dyno.connectionpool.exception.BadRequestException;
import com.netflix.dyno.connectionpool.exception.FatalConnectionException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.exception.PoolExhaustedException;
import com.netflix.dyno.connectionpool.exception.PoolTimeoutException;
import com.netflix.dyno.connectionpool.exception.TimeoutException;
import com.netflix.dyno.connectionpool.impl.utils.EstimatedHistogram;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
/**
* Impl of {@link ConnectionPoolMonitor} using thread safe AtomicLongs
*
* @author poberai
*/
public class CountingConnectionPoolMonitor implements ConnectionPoolMonitor {
// Tracking operation level metrics
private final AtomicLong operationFailureCount = new AtomicLong();
private final AtomicLong operationSuccessCount = new AtomicLong();
// Tracking connection counts
private final AtomicLong connectionCreateCount = new AtomicLong();
private final AtomicLong connectionClosedCount = new AtomicLong();
private final AtomicLong connectionCreateFailureCount = new AtomicLong();
private final AtomicLong connectionBorrowCount = new AtomicLong();
private final AtomicLong connectionReturnCount = new AtomicLong();
private final AtomicLong connectionRecycledCount = new AtomicLong();
private final AtomicLong operationFailoverCount = new AtomicLong();
private final AtomicLong poolTimeoutCount = new AtomicLong();
private final AtomicLong poolExhastedCount = new AtomicLong();
private final AtomicLong operationTimeoutCount = new AtomicLong();
private final AtomicLong socketTimeoutCount = new AtomicLong();
private final AtomicLong noHostsCount = new AtomicLong();
private final AtomicLong unknownErrorCount = new AtomicLong();
private final AtomicLong badRequestCount = new AtomicLong();
private final EstimatedHistogram borrowedConnHistogram = new EstimatedHistogram();
// Use an explicit host count rather than relying on hostStats
// being synchronized with the HostSupplier counts. One case
// where we can get out of sync is node replacement. In that case
// the host will be removed from the HostSupplier but will still be
// in our hostStats as 'down'.
private final AtomicLong hostSupplierCount = new AtomicLong();
private final ConcurrentHashMap<Host, HostConnectionStats> hostStats = new ConcurrentHashMap<Host, HostConnectionStats>();
public CountingConnectionPoolMonitor() {
}
private void trackError(Host host, Exception reason) {
if (reason != null) {
if (reason instanceof PoolTimeoutException) {
this.poolTimeoutCount.incrementAndGet();
} else if (reason instanceof PoolExhaustedException) {
this.poolExhastedCount.incrementAndGet();
} else if (reason instanceof TimeoutException) {
this.socketTimeoutCount.incrementAndGet();
} else if (reason instanceof FatalConnectionException) {
this.socketTimeoutCount.incrementAndGet();
} else if (reason instanceof BadRequestException) {
this.badRequestCount.incrementAndGet();
} else if (reason instanceof NoAvailableHostsException) {
this.noHostsCount.incrementAndGet();
} else {
this.unknownErrorCount.incrementAndGet();
}
} else {
this.unknownErrorCount.incrementAndGet();
}
if (host != null) {
getOrCreateHostStats(host).opFailure.incrementAndGet();
}
}
@Override
public void setHostCount(long hostCount) {
this.hostSupplierCount.set(hostCount);
}
@Override
public void incOperationFailure(Host host, Exception reason) {
this.operationFailureCount.incrementAndGet();
trackError(host, reason);
}
public long getOperationFailureCount() {
return this.operationFailureCount.get();
}
@Override
public void incOperationSuccess(Host host, long latency) {
this.operationSuccessCount.incrementAndGet();
getOrCreateHostStats(host).opSuccess.incrementAndGet();
}
public long getOperationSuccessCount() {
return this.operationSuccessCount.get();
}
@Override
public void incConnectionCreated(Host host) {
this.connectionCreateCount.incrementAndGet();
getOrCreateHostStats(host).created.incrementAndGet();
}
public long getConnectionCreatedCount() {
return this.connectionCreateCount.get();
}
@Override
public void incConnectionClosed(Host host, Exception reason) {
this.connectionClosedCount.incrementAndGet();
getOrCreateHostStats(host).closed.incrementAndGet();
}
public long getConnectionClosedCount() {
return this.connectionClosedCount.get();
}
@Override
public void incConnectionCreateFailed(Host host, Exception reason) {
this.connectionCreateFailureCount.incrementAndGet();
getOrCreateHostStats(host).createFailed.incrementAndGet();
}
public long getConnectionCreateFailedCount() {
return this.connectionCreateFailureCount.get();
}
@Override
public void incConnectionBorrowed(Host host, long delay) {
this.connectionBorrowCount.incrementAndGet();
this.borrowedConnHistogram.add(delay);
if (host == null) {
return;
}
getOrCreateHostStats(host).borrowed.incrementAndGet();
}
@Override
public long getConnectionBorrowedLatMean() {
return borrowedConnHistogram.mean();
}
@Override
public long getConnectionBorrowedLatP50() {
return borrowedConnHistogram.percentile(0.5);
}
@Override
public long getConnectionBorrowedLatP99() {
return borrowedConnHistogram.percentile(0.99);
}
public long getConnectionBorrowedCount() {
return this.connectionBorrowCount.get();
}
@Override
public void resetConnectionBorrowedLatStats() {
this.borrowedConnHistogram.getBuckets(true);
}
@Override
public void incConnectionReturned(Host host) {
this.connectionReturnCount.incrementAndGet();
if (host == null) {
return;
}
getOrCreateHostStats(host).returned.incrementAndGet();
}
public long getConnectionReturnedCount() {
return this.connectionReturnCount.get();
}
@Override
public void incConnectionRecycled(Host host) {
this.connectionRecycledCount.incrementAndGet();
}
@Override
public long getConnectionRecycledCount() {
return this.connectionRecycledCount.get();
}
public long getPoolExhaustedTimeoutCount() {
return this.poolExhastedCount.get();
}
@Override
public long getSocketTimeoutCount() {
return this.socketTimeoutCount.get();
}
public long getOperationTimeoutCount() {
return this.operationTimeoutCount.get();
}
@Override
public void incFailover(Host host, Exception reason) {
this.operationFailoverCount.incrementAndGet();
}
@Override
public long getFailoverCount() {
return this.operationFailoverCount.get();
}
@Override
public long getNoHostCount() {
return this.noHostsCount.get();
}
@Override
public long getUnknownErrorCount() {
return this.unknownErrorCount.get();
}
@Override
public long getBadRequestCount() {
return this.badRequestCount.get();
}
public long getNumBusyConnections() {
return this.connectionBorrowCount.get() - this.connectionReturnCount.get();
}
public long getNumOpenConnections() {
return this.connectionCreateCount.get() - this.connectionClosedCount.get();
}
public String toString() {
// Build the complete status string
return new StringBuilder()
.append("CountingConnectionPoolMonitor(")
.append("Connections[")
.append("open=").append(getNumOpenConnections())
.append(",busy=").append(getNumBusyConnections())
.append(",create=").append(connectionCreateCount.get())
.append(",close=").append(connectionClosedCount.get())
.append(",createFailed=").append(connectionCreateFailureCount.get())
.append(",borrow=").append(connectionBorrowCount.get())
.append(",return=").append(connectionReturnCount.get())
.append(",recycle=").append(connectionRecycledCount.get())
.append("], Operations[")
.append("success=").append(operationSuccessCount.get())
.append(",failure=").append(operationFailureCount.get())
.append(",optimeout=").append(operationTimeoutCount.get())
.append(",timeout=").append(socketTimeoutCount.get())
.append(",failover=").append(operationFailoverCount.get())
.append(",nohosts=").append(noHostsCount.get())
.append(",unknown=").append(unknownErrorCount.get())
.append(",exhausted=").append(poolExhastedCount.get())
.append("], Hosts[")
.append("up=").append(getHostUpCount())
.append(",down=").append(getHostDownCount())
.append("])").toString();
}
@Override
public long getHostCount() {
return this.hostSupplierCount.get();
}
@Override
public long getHostUpCount() {
int count = 0;
for (HostConnectionStats stats : hostStats.values()) {
count = stats.isHostUp() ? count + 1 : count;
}
return count;
}
@Override
public long getHostDownCount() {
return getHostCount() - getHostUpCount();
}
@Override
public void hostAdded(Host host, HostConnectionPool<?> pool) {
getOrCreateHostStats(host).hostUp.set(true);
}
@Override
public void hostRemoved(Host host) {
getOrCreateHostStats(host).hostUp.set(false);
}
@Override
public void hostDown(Host host, Exception reason) {
getOrCreateHostStats(host).hostUp.set(false);
}
@Override
public void hostUp(Host host, HostConnectionPool<?> pool) {
getOrCreateHostStats(host).hostUp.set(true);
}
@Override
public Map<Host, HostConnectionStats> getHostStats() {
return hostStats;
}
public HostConnectionStatsImpl getOrCreateHostStats(Host host) {
HostConnectionStatsImpl hStats = (HostConnectionStatsImpl) hostStats.get(host);
if (hStats != null) {
return hStats;
}
hostStats.putIfAbsent(host, new HostConnectionStatsImpl(host));
return (HostConnectionStatsImpl) hostStats.get(host);
}
private class HostConnectionStatsImpl implements HostConnectionStats {
private AtomicBoolean hostUp = new AtomicBoolean(true);
private final String name;
private final AtomicLong opFailure = new AtomicLong();
private final AtomicLong opSuccess = new AtomicLong();
private final AtomicLong created = new AtomicLong();
private final AtomicLong closed = new AtomicLong();
private final AtomicLong createFailed = new AtomicLong();
private final AtomicLong borrowed = new AtomicLong();
private final AtomicLong returned = new AtomicLong();
private HostConnectionStatsImpl(Host host) {
this.name = host.getHostAddress();
}
@Override
public boolean isHostUp() {
return hostUp.get();
}
@Override
public long getConnectionsBorrowed() {
return borrowed.get();
}
@Override
public long getConnectionsReturned() {
return returned.get();
}
@Override
public long getConnectionsCreated() {
return created.get();
}
@Override
public long getConnectionsClosed() {
return closed.get();
}
@Override
public long getConnectionsCreateFailed() {
return createFailed.get();
}
@Override
public long getOperationSuccessCount() {
return opSuccess.get();
}
@Override
public long getOperationErrorCount() {
return opFailure.get();
}
public String toString() {
return name + " isUp: " + hostUp.get() +
", borrowed: " + borrowed.get() +
", returned: " + returned.get() +
", created: " + created.get() +
", closed: " + closed.get() +
", createFailed: " + createFailed.get() +
", success: " + opSuccess.get() +
", error: " + opFailure.get();
}
}
}
| 6,043 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/ConnectionContextImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.dyno.connectionpool.ConnectionContext;
public class ConnectionContextImpl implements ConnectionContext {
private final ConcurrentHashMap<String, Object> context = new ConcurrentHashMap<String, Object>();
@Override
public void setMetadata(String key, Object obj) {
context.put(key, obj);
}
@Override
public Object getMetadata(String key) {
return context.get(key);
}
@Override
public boolean hasMetadata(String key) {
return context.containsKey(key);
}
@Override
public void reset() {
context.clear();
}
@Override
public Map<String, Object> getAll() {
return context;
}
}
| 6,044 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/FutureOperationalResultImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.ListenableFuture;
import com.netflix.dyno.connectionpool.OperationMonitor;
import com.netflix.dyno.connectionpool.OperationResult;
/**
* Impl for Future<OperationResult<R>> that encapsulates an inner future.
* The class provides a functionality to record the time when the caller calls get() on the future.
* This helps record end-end timing for async operations.
* Not that there is a caveat here that if the future is called at a later point in time, then yes the timing stats
* will appear to be bloated unnecessarily. What we really need here is a listenable future, where we should log the
* timing stats on the callback.
*
* @param <R>
* @author poberai
*/
public class FutureOperationalResultImpl<R> implements ListenableFuture<OperationResult<R>> {
private final Future<R> future;
private final OperationResultImpl<R> opResult;
private final long startTime;
private final AtomicBoolean timeRecorded = new AtomicBoolean(false);
public FutureOperationalResultImpl(String opName, Future<R> rFuture, long start, OperationMonitor opMonitor) {
this.future = rFuture;
this.opResult = new OperationResultImpl<R>(opName, rFuture, opMonitor).attempts(1);
this.startTime = start;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return future.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return future.isCancelled();
}
@Override
public boolean isDone() {
return future.isDone();
}
@Override
public OperationResult<R> get() throws InterruptedException, ExecutionException {
try {
future.get();
return opResult;
} finally {
recordTimeIfNeeded();
}
}
private void recordTimeIfNeeded() {
if (timeRecorded.get()) {
return;
}
if (timeRecorded.compareAndSet(false, true)) {
opResult.setLatency(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
}
}
@Override
public OperationResult<R> get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
try {
future.get(timeout, unit);
return opResult;
} finally {
recordTimeIfNeeded();
}
}
public FutureOperationalResultImpl<R> node(Host node) {
opResult.setNode(node);
return this;
}
public OperationResultImpl<R> getOpResult() {
return opResult;
}
@Override
public void addListener(Runnable listener, Executor executor) {
throw new RuntimeException("Not Implemented");
}
}
| 6,045 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/DynoBinarySearch.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.hash;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import com.netflix.dyno.connectionpool.HashPartitioner;
/**
* Utility class that maps a given token hashed from a key using a {@link HashPartitioner}
* to a dynomite server on the dynomite topology ring.
* <p>
* Note that as long as the Token T implements the comparable interface this class can be used
* to perform the bin search on other homogeneous lists as well.
* <p>
* Here are the imp details of the mapping algorithm
* 1. If a hashed token directly maps to a point on that ring, then that point is chosen.
* 2. If a hashed token maps between 2 points A and B where A > B then B is chosen as the owner of the token
* 3. All hashed tokens that go past the last point on the ring are mapped to the first point on the ring.
* <p>
* e.g
* <p>
* Consider the following points on the ring.
* <p>
* 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
* <p>
* Elements 0 .. 9 --> 10
* 10 --> 10
* 15 --> 20
* 30 --> 30
* 58 --> 60
* 100 --> 100
* 100 + --> 10
*
* @param <T>
* @author poberai
*/
public class DynoBinarySearch<T extends Comparable<T>> {
private final List<DynoTokenRange<T>> rangeList = new ArrayList<DynoTokenRange<T>>();
private final AtomicBoolean listEmpty = new AtomicBoolean(false);
public DynoBinarySearch(List<T> list) {
if (list.isEmpty()) {
listEmpty.set(true);
return;
}
if (list.size() == 1) {
rangeList.add(new DynoTokenRange<T>(null, list.get(0)));
} else {
// add the first range
rangeList.add(new DynoTokenRange<T>(null, list.get(0)));
// add rest of the tokens
for (int i = 1; i < (list.size()); i++) {
rangeList.add(new DynoTokenRange<T>(list.get(i - 1), list.get(i)));
}
rangeList.get(rangeList.size() - 1).isLastRange = true;
}
}
public T getTokenOwner(T token) {
// Some quick boundary checks
if (listEmpty.get()) {
return null;
}
if (rangeList.size() == 1) {
return rangeList.get(0).getTokenOwner();
}
DynoTokenRange<T> firstRange = rangeList.get(0);
DynoTokenRange<T> lastRange = rangeList.get(rangeList.size() - 1);
if (firstRange.compareTo(token) == 0) {
// Token is smaller than FIRST range, map to first range.
return firstRange.getTokenOwner();
}
if (lastRange.compareTo(token) < 0) {
// Token is greater than LAST range, map to first range.
return firstRange.getTokenOwner();
}
int index = Collections.binarySearch(rangeList, token);
if (index < 0) {
throw new RuntimeException("Token not found!: " + token);
}
return rangeList.get(index).getTokenOwner();
}
public String toString() {
StringBuilder sb = new StringBuilder("[DynoBinarySearch:\n");
for (DynoTokenRange<T> r : rangeList) {
sb.append(r.toString()).append("\n");
}
sb.append("]");
return sb.toString();
}
static class DynoTokenRange<T extends Comparable<T>> implements Comparable<T> {
final T start;
final T end;
boolean isFirstRange = false;
boolean isLastRange = false;
DynoTokenRange(T s, T e) {
this.start = s;
this.end = e;
if (s == null) {
isFirstRange = true;
}
if (isFirstRange) {
if (end == null) {
throw new RuntimeException("Bad Range: end must not be null");
}
} else if (!(lessThan(start, end))) {
throw new RuntimeException("Bad Range: start must be less than end: " + this.toString());
}
}
public T getTokenOwner() {
return end;
}
public String toString() {
if (isFirstRange) {
return "(null," + end + "]";
} else {
return "(" + start + "," + end + "]";
}
}
@Override
public int compareTo(T key) {
// Boundary checks, to be safe!
if (isFirstRange) {
if (lessThanEquals(key, end)) {
return 0; // This key is within this range
} else {
// else This range is smaller than this key
return -1;
}
}
// Functionality for any other range i.e in another position in the list except for the first
if (lessThanEquals(key, start)) {
// This range is greater than the key
return 1;
}
if (lessThan(start, key) && lessThanEquals(key, end)) {
// This key is within this range
return 0;
}
if (lessThan(end, key)) {
// This range is smaller than this key
return -1;
} else {
throw new RuntimeException("Invalid key for bin search: " + key + ", this range: " + this.toString());
}
}
private boolean lessThan(T left, T right) {
return left.compareTo(right) < 0;
}
private boolean lessThanEquals(T left, T right) {
return left.compareTo(right) <= 0;
}
}
}
| 6,046 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/UnsignedIntsUtils.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
public class UnsignedIntsUtils {
static final long INT_MASK = 0xffffffffL;
private UnsignedIntsUtils() {
}
/**
* Returns the value of the given {@code int} as a {@code long}, when treated as unsigned.
*/
public static long toLong(int value) {
return value & INT_MASK;
}
}
| 6,047 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/Murmur3Hash.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
public class Murmur3Hash {
/** Returns the MurmurHash3_x86_32 hash. */
public static int hash32(byte[] data, int length) {
return murmurhash3x8632(data, 0, length, 0);
}
public static int murmurhash3x8632(byte[] data, int offset, int len, int seed) {
int c1 = 0xcc9e2d51;
int c2 = 0x1b873593;
int h1 = seed;
int roundedEnd = offset + (len & 0xfffffffc); // round down to 4 byte block
for (int i = offset; i < roundedEnd; i += 4) {
// little endian load order
int k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | ((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24);
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13);
h1 = h1 * 5 + 0xe6546b64;
}
// tail
int k1 = 0;
switch (len & 0x03) {
case 3:
k1 = (data[roundedEnd + 2] & 0xff) << 16;
// fallthrough
case 2:
k1 |= (data[roundedEnd + 1] & 0xff) << 8;
// fallthrough
case 1:
k1 |= data[roundedEnd] & 0xff;
k1 *= c1;
k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
default:
}
// finalization
h1 ^= len;
// fmix(h1);
h1 ^= h1 >>> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >>> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >>> 16;
return h1;
}
}
| 6,048 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/Murmur2Hash.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
/**
* murmur hash 2.0.
*
* The murmur hash is a relatively fast hash function from
* http://murmurhash.googlepages.com/ for platforms with efficient
* multiplication.
*
* This is a re-implementation of the original C code plus some
* additional features.
*
* Public domain.
*
* @author Viliam Holub
* @version 1.0.2
*
*/
public final class Murmur2Hash {
// all methods static; private constructor.
private Murmur2Hash() {
}
/**
* Generates 32 bit hash from byte array of the given length and
* seed.
*
* @param data byte array to hash
* @param length length of the array to hash
* @param seed initial seed value
* @return 32 bit hash of the given array
*/
public static int hash32(final byte[] data, int length, int seed) {
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
final int m = 0x5bd1e995;
final int r = 24;
// Initialize the hash to a random value
int h = seed ^ length;
int length4 = length / 4;
for (int i = 0; i < length4; i++) {
final int i4 = i * 4;
int k = (data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8)
+ ((data[i4 + 2] & 0xff) << 16) + ((data[i4 + 3] & 0xff) << 24);
k *= m;
k ^= k >>> r;
k *= m;
h *= m;
h ^= k;
}
// Handle the last few bytes of the input array
switch (length % 4) {
case 3:
h ^= (data[(length & ~3) + 2] & 0xff) << 16;
case 2:
h ^= (data[(length & ~3) + 1] & 0xff) << 8;
case 1:
h ^= (data[length & ~3] & 0xff);
h *= m;
}
h ^= h >>> 13;
h *= m;
h ^= h >>> 15;
return h;
}
/**
* Generates 32 bit hash from byte array with default seed value.
*
* @param data byte array to hash
* @param length length of the array to hash
* @return 32 bit hash of the given array
*/
public static int hash32(final byte[] data, int length) {
return hash32(data, length, 0x9747b28c);
}
/**
* Generates 32 bit hash from a string.
*
* @param text string to hash
* @return 32 bit hash of the given string
*/
public static int hash32(final String text) {
final byte[] bytes = text.getBytes();
return hash32(bytes, bytes.length);
}
/**
* Generates 32 bit hash from a substring.
*
* @param text string to hash
* @param from starting index
* @param length length of the substring to hash
* @return 32 bit hash of the given string
*/
public static int hash32(final String text, int from, int length) {
return hash32(text.substring(from, from + length));
}
/**
* Generates 64 bit hash from byte array of the given length and seed.
*
* @param data byte array to hash
* @param length length of the array to hash
* @param seed initial seed value
* @return 64 bit hash of the given array
*/
public static long hash64(final byte[] data, int length, int seed) {
final long m = 0xc6a4a7935bd1e995L;
final int r = 47;
long h = (seed & 0xffffffffl) ^ (length * m);
int length8 = length / 8;
for (int i = 0; i < length8; i++) {
final int i8 = i * 8;
long k = ((long) data[i8 + 0] & 0xff) + (((long) data[i8 + 1] & 0xff) << 8)
+ (((long) data[i8 + 2] & 0xff) << 16) + (((long) data[i8 + 3] & 0xff) << 24)
+ (((long) data[i8 + 4] & 0xff) << 32) + (((long) data[i8 + 5] & 0xff) << 40)
+ (((long) data[i8 + 6] & 0xff) << 48) + (((long) data[i8 + 7] & 0xff) << 56);
k *= m;
k ^= k >>> r;
k *= m;
h ^= k;
h *= m;
}
switch (length % 8) {
case 7:
h ^= (long) (data[(length & ~7) + 6] & 0xff) << 48;
case 6:
h ^= (long) (data[(length & ~7) + 5] & 0xff) << 40;
case 5:
h ^= (long) (data[(length & ~7) + 4] & 0xff) << 32;
case 4:
h ^= (long) (data[(length & ~7) + 3] & 0xff) << 24;
case 3:
h ^= (long) (data[(length & ~7) + 2] & 0xff) << 16;
case 2:
h ^= (long) (data[(length & ~7) + 1] & 0xff) << 8;
case 1:
h ^= (long) (data[length & ~7] & 0xff);
h *= m;
}
;
h ^= h >>> r;
h *= m;
h ^= h >>> r;
return h;
}
/**
* Generates 64 bit hash from byte array with default seed value.
*
* @param data byte array to hash
* @param length length of the array to hash
* @return 64 bit hash of the given string
*/
public static long hash64(final byte[] data, int length) {
return hash64(data, length, 0xe17a1465);
}
/**
* Generates 64 bit hash from a string.
*
* @param text string to hash
* @return 64 bit hash of the given string
*/
public static long hash64(final String text) {
final byte[] bytes = text.getBytes();
return hash64(bytes, bytes.length);
}
/**
* Generates 64 bit hash from a substring.
*
* @param text string to hash
* @param from starting index
* @param length length of the substring to hash
* @return 64 bit hash of the given array
*/
public static long hash64(final String text, int from, int length) {
return hash64(text.substring(from, from + length));
}
} | 6,049 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/Murmur1HashPartitioner.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
/**
* Impl of {@link HashPartitioner} that uses {@link Murmur1Hash}
* @author poberai
*
*/
public class Murmur1HashPartitioner implements HashPartitioner {
private static final String UTF_8 = "UTF-8";
private static final Charset charset = Charset.forName(UTF_8);
@Override
public Long hash(String key) {
if (key == null) {
return 0L;
}
ByteBuffer bb = ByteBuffer.wrap(key.getBytes(charset));
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur1Hash.hash(b, b.length));
}
@Override
public Long hash(byte[] key) {
if (key == null) {
return 0L;
}
return UnsignedIntsUtils.toLong(Murmur1Hash.hash(key, key.length));
}
@Override
public Long hash(long key) {
ByteBuffer bb = ByteBuffer.allocate(8).putLong(0, key);
bb.rewind();
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur1Hash.hash(b, b.length));
}
@Override
public Long hash(int key) {
ByteBuffer bb = ByteBuffer.allocate(4);
bb.putInt(key);
bb.rewind();
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur1Hash.hash(b, b.length));
}
@Override
public HostToken getToken(Long keyHash) {
throw new RuntimeException("NotImplemented");
}
} | 6,050 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/Murmur3HashPartitioner.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
/**
* Impl of {@link HashPartitioner} that uses {@link Murmur3Hash}
* @author poberai
*
*/
public class Murmur3HashPartitioner implements HashPartitioner {
private static final String UTF_8 = "UTF-8";
private static final Charset charset = Charset.forName(UTF_8);
public Murmur3HashPartitioner() {
}
@Override
public Long hash(long key) {
ByteBuffer bb = ByteBuffer.allocate(8).putLong(0, key);
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur3Hash.hash32(b, b.length));
}
@Override
public Long hash(int key) {
ByteBuffer bb = ByteBuffer.allocate(4);
bb.putInt(key);
bb.rewind();
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur3Hash.hash32(b, b.length));
}
@Override
public Long hash(String key) {
if (key == null) {
return 0L;
}
ByteBuffer bb = ByteBuffer.wrap(key.getBytes(charset));
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur3Hash.hash32(b, b.length));
}
@Override
public Long hash(byte[] key) {
if (key == null) {
return 0L;
}
return UnsignedIntsUtils.toLong(Murmur3Hash.hash32(key, key.length));
}
@Override
public HostToken getToken(Long keyHash) {
throw new RuntimeException("NotImplemented");
}
}
| 6,051 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/Murmur2HashPartitioner.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
/**
* Impl of {@link HashPartitioner} that uses {@link Murmur2Hash}
* @author poberai
*
*/
public class Murmur2HashPartitioner implements HashPartitioner {
private static final String UTF_8 = "UTF-8";
private static final Charset charset = Charset.forName(UTF_8);
public Murmur2HashPartitioner() {
}
@Override
public Long hash(long key) {
ByteBuffer bb = ByteBuffer.allocate(8).putLong(0, key);
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur2Hash.hash32(b, b.length));
}
@Override
public Long hash(int key) {
ByteBuffer bb = ByteBuffer.allocate(4);
bb.putInt(key);
bb.rewind();
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur2Hash.hash32(b, b.length));
}
@Override
public Long hash(String key) {
if (key == null) {
return 0L;
}
ByteBuffer bb = ByteBuffer.wrap(key.getBytes(charset));
byte[] b = bb.array();
return UnsignedIntsUtils.toLong(Murmur2Hash.hash32(b, b.length));
}
@Override
public Long hash(byte[] key) {
if (key == null) {
return 0L;
}
return UnsignedIntsUtils.toLong(Murmur2Hash.hash32(key, key.length));
}
@Override
public HostToken getToken(Long keyHash) {
throw new RuntimeException("NotImplemented");
}
}
| 6,052 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/BinarySearchTokenMapper.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.hash;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
/**
* Impl of {@link HashPartitioner} that can be used to keys to the dynomite topology ring using the binary search mechanism.
* Note that the class only performs the function of binary search to locate a hash token on the dynomite topology ring.
* The hash token to be generated from the key is generated using the HashPartitioner provided to this class.
*
* @author poberai
* @author ipapapa
*/
public class BinarySearchTokenMapper implements HashPartitioner {
private final HashPartitioner partitioner;
private final AtomicReference<DynoBinarySearch<Long>> binarySearch = new AtomicReference<DynoBinarySearch<Long>>(null);
private final ConcurrentHashMap<Long, HostToken> tokenMap = new ConcurrentHashMap<Long, HostToken>();
public BinarySearchTokenMapper(HashPartitioner p) {
this.partitioner = p;
}
@Override
public Long hash(int key) {
return partitioner.hash(key);
}
@Override
public Long hash(long key) {
return partitioner.hash(key);
}
@Override
public Long hash(String key) {
return partitioner.hash(key);
}
@Override
public Long hash(byte[] key) {
return partitioner.hash(key);
}
@Override
public HostToken getToken(Long keyHash) {
Long token = binarySearch.get().getTokenOwner(keyHash);
if (token == null) {
throw new NoAvailableHostsException("Token not found for key hash: " + keyHash);
}
return tokenMap.get(token);
}
public void initSearchMechanism(Collection<HostToken> hostTokens) {
for (HostToken hostToken : hostTokens) {
tokenMap.put(hostToken.getToken(), hostToken);
}
initBinarySearch();
}
public void addHostToken(HostToken hostToken) {
HostToken prevToken = tokenMap.putIfAbsent(hostToken.getToken(), hostToken);
if (prevToken == null) {
initBinarySearch();
}
}
public void remoteHostToken(HostToken hostToken) {
HostToken prevToken = tokenMap.remove(hostToken.getToken());
if (prevToken != null) {
initBinarySearch();
}
}
public void removeHost(Host host) {
HostToken theToken = null;
for (HostToken token : tokenMap.values()) {
if (token.getHost().equals(host)) {
theToken = token;
break;
}
}
if (theToken != null) {
remoteHostToken(theToken);
}
}
private void initBinarySearch() {
List<Long> tokens = new ArrayList<Long>(tokenMap.keySet());
Collections.sort(tokens);
binarySearch.set(new DynoBinarySearch<Long>(tokens));
}
public boolean isEmpty() {
return this.tokenMap.size() == 0;
}
public String toString() {
return binarySearch.toString();
}
}
| 6,053 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/hash/Murmur1Hash.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.hash;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
public class Murmur1Hash {
/**
* Hashes bytes in part of an array.
* @param data The data to hash.
* @param offset Where to start munging.
* @param length How many bytes to process.
* @param seed The seed to start with.
* @return The 32-bit hash of the data in question.
*/
public static int hash(byte[] data, int offset, int length, int seed) {
return hash(ByteBuffer.wrap(data, offset, length), seed);
}
public static int hash(byte[] data, int length) {
int seed = (0xdeadbeef * length);
return hash(ByteBuffer.wrap(data, 0, length), seed);
}
/**
* Hashes the bytes in a buffer from the current position to the limit.
* @param buf The bytes to hash.
* @param seed The seed for the hash.
* @return The 32 bit murmur hash of the bytes in the buffer.
*/
public static int hash(ByteBuffer buf, int seed) {
// save byte order for later restoration
ByteOrder byteOrder = buf.order();
buf.order(ByteOrder.LITTLE_ENDIAN);
int m = 0x5bd1e995;
int r = 24;
int h = seed ^ buf.remaining();
while (buf.remaining() >= 4) {
int k = buf.getInt();
k *= m;
k ^= k >>> r;
k *= m;
h *= m;
h ^= k;
}
if (buf.remaining() > 0) {
ByteBuffer finish = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN);
// for big-endian version, use this first:
// finish.position(4-buf.remaining());
finish.put(buf).rewind();
h ^= finish.getInt();
h *= m;
}
h ^= h >>> 13;
h *= m;
h ^= h >>> 15;
buf.order(byteOrder);
return h;
}
public static long hash64A(byte[] data, int seed) {
return hash64A(ByteBuffer.wrap(data), seed);
}
public static long hash64A(byte[] data, int offset, int length, int seed) {
return hash64A(ByteBuffer.wrap(data, offset, length), seed);
}
public static long hash64A(ByteBuffer buf, int seed) {
ByteOrder byteOrder = buf.order();
buf.order(ByteOrder.LITTLE_ENDIAN);
long m = 0xc6a4a7935bd1e995L;
int r = 47;
long h = seed ^ (buf.remaining() * m);
while (buf.remaining() >= 8) {
long k = buf.getLong();
k *= m;
k ^= k >>> r;
k *= m;
h ^= k;
h *= m;
}
if (buf.remaining() > 0) {
ByteBuffer finish = ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN);
// for big-endian version, do this first:
// finish.position(8-buf.remaining());
finish.put(buf).rewind();
h ^= finish.getLong();
h *= m;
}
h ^= h >>> r;
h *= m;
h ^= h >>> r;
buf.order(byteOrder);
return h;
}
}
| 6,054 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/health/ErrorMonitor.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.health;
public interface ErrorMonitor {
/**
* Monitor errors
* @param numErrors
* @return true/false indicating whether the error are within the threshold.
* True: Errors still ok. False: errors have crossed the threshold
*/
boolean trackError(int numErrors);
interface ErrorMonitorFactory {
@Deprecated
ErrorMonitor createErrorMonitor();
ErrorMonitor createErrorMonitor(int maxValue);
}
}
| 6,055 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/health/RateTracker.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.health;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.FutureTask;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.dyno.connectionpool.exception.DynoException;
/**
* Class that tracks the rate at which events occur over a specified rolling time window (in seconds)
* This is useful for tracking error rates from {@link ErrorRateMonitor}
*
* @author poberai
*/
public class RateTracker {
private final AtomicReference<BucketCreator> bucketCreateLock = new AtomicReference<BucketCreator>(null);
private final AtomicInteger wonLock = new AtomicInteger(0);
final RollingWindow rWindow;
public RateTracker(int numSeconds) {
int windowSize = numSeconds;
rWindow = new RollingWindow(windowSize);
}
public void trackRate() {
trackRate(1);
}
public void trackRate(int count) {
long currentTime = System.currentTimeMillis() / 1000; // the current second
// compare the current window
int compare = rWindow.compareWindow(currentTime);
if (compare == 0) {
// it is the same window, increment the quota and check the rate for this second
rWindow.trackRate(count);
} else if (compare < 0) {
// the current window that is tracked is in the past, so create the window for this second
// it does not matter if some other thread beat us to setting the last bucket.
BucketCreator expected = bucketCreateLock.get();
BucketCreator newCreator = new BucketCreator(currentTime);
boolean success = bucketCreateLock.compareAndSet(expected, newCreator);
if (success) {
wonLock.incrementAndGet();
newCreator.futureBucket.run();
} else {
try {
bucketCreateLock.get().futureBucket.get(20, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
//return true;
e.printStackTrace();
} catch (Exception e) {
throw new DynoException(e);
}
}
rWindow.trackRate(count);
} else {
// it is the prev window, let the request through
return;
}
}
public List<Bucket> getBuckets(int lookback) {
return rWindow.getBuckets(lookback);
}
public List<Bucket> getAllBuckets() {
return rWindow.getAllBuckets();
}
// used for unit tests
int getWonLockCount() {
return wonLock.get();
}
class RollingWindow {
private final int windowSize;
private final LinkedBlockingDeque<Bucket> queue = new LinkedBlockingDeque<Bucket>();
private final AtomicInteger bucketCreateCount = new AtomicInteger(0);
private RollingWindow(int wSize) {
windowSize = wSize;
long currentTime = System.currentTimeMillis() / 1000;
long startTime = currentTime - windowSize + 1;
for (long i = startTime; i <= currentTime; i++) {
queue.addFirst(new Bucket(i));
}
}
private void trackRate(int count) {
queue.peekFirst().track(count);
}
// used primarily for unit tests
int getQueueSize() {
return queue.size();
}
int getBucketCreateCount() {
return bucketCreateCount.get();
}
private List<Bucket> getBuckets(int lookback) {
List<Bucket> list = new ArrayList<Bucket>();
int count = 0;
Iterator<Bucket> iter = queue.iterator();
while (iter.hasNext() && count < lookback) {
list.add(iter.next());
count++;
}
return list;
}
private List<Bucket> getAllBuckets() {
List<Bucket> list = new ArrayList<Bucket>();
Iterator<Bucket> iter = queue.iterator();
while (iter.hasNext()) {
list.add(iter.next());
}
return list;
}
private int compareWindow(long currentTimestamp) {
Long lastBucketTimestamp = queue.peekFirst().lastTimestamp.get();
return lastBucketTimestamp.compareTo(currentTimestamp);
}
private void addNewBucket(long timestamp) {
bucketCreateCount.incrementAndGet();
Bucket newBucket = new Bucket(timestamp);
queue.removeLast();
queue.addFirst(newBucket);
}
private void syncToNewWindow(long timestamp) {
long currentTimestamp = queue.peekFirst().lastTimestamp.get();
if (currentTimestamp == timestamp) {
return;
}
while (currentTimestamp < timestamp) {
currentTimestamp++;
addNewBucket(currentTimestamp);
}
}
public Bucket firstBucket() {
return queue.peekFirst();
}
}
public static class Bucket {
private final AtomicLong lastTimestamp = new AtomicLong(0L);
private final AtomicInteger count = new AtomicInteger(0);
public Bucket() {
this(System.currentTimeMillis() / 1000); // the current second
}
private Bucket(long timestamp) {
lastTimestamp.set(timestamp); // the current second
}
public int track(int delta) {
return count.addAndGet(delta);
}
public int count() {
return count.get();
}
public long timestamp() {
return lastTimestamp.get();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + new Long(lastTimestamp.get()).intValue();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
Bucket other = (Bucket) obj;
return this.lastTimestamp.get() == other.lastTimestamp.get();
}
public String toString() {
return "" + this.count();
}
}
private class BucketCreator {
private final String id = UUID.randomUUID().toString();
private final long timestamp;
private final FutureTask<Bucket> futureBucket;
private BucketCreator(long time) {
this.timestamp = time;
this.futureBucket = new FutureTask<Bucket>(new Callable<Bucket>() {
@Override
public Bucket call() throws Exception {
rWindow.syncToNewWindow(timestamp);
return rWindow.firstBucket();
}
});
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + (int) (timestamp ^ (timestamp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
BucketCreator other = (BucketCreator) obj;
boolean equals = true;
equals &= (id != null) ? id.equals(other.id) : other.id == null;
equals &= (timestamp == other.timestamp);
return equals;
}
}
}
| 6,056 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/health/ConnectionPoolHealthTracker.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.health;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.exception.PoolExhaustedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.dyno.connectionpool.Host.Status;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.FatalConnectionException;
import com.netflix.dyno.connectionpool.exception.TimeoutException;
/**
* This class tracks the error rates for any {@link HostConnectionPool} via the {@link ErrorRateMonitor}
* The error rates are recorded directly by the class but the error rates are checked asynchronously in another thread.
* Once the thread decides that the error rates have crossed a configured threshold, then the {@link HostConnectionPool} is recycled.
* i.e it is first marked as DOWN to prevent any new connections from being borrowed from it. Then the pool is reconnect()'d
* <p>
* Hence callers to {@link HostConnectionPool} should take it's isActive() state into account when using this class.
* i.e before borrowing a connection check for isActive(). If not active, then use a fallback pool else throw an ex to the caller.
* Resume executing operations against the pool only once the pool becomes active.
*
* @param <CL>
* @author poberai
*/
public class ConnectionPoolHealthTracker<CL> implements HealthTracker<CL> {
private static final Logger Logger = LoggerFactory.getLogger(ConnectionPoolHealthTracker.class);
private final ConnectionPoolConfiguration cpConfiguration;
private final ScheduledExecutorService threadPool;
private final AtomicBoolean stop = new AtomicBoolean(false);
private final ConcurrentHashMap<Host, ErrorMonitor> errorRates = new ConcurrentHashMap<Host, ErrorMonitor>();
private final ConcurrentHashMap<Host, HostConnectionPool<CL>> reconnectingPools = new ConcurrentHashMap<Host, HostConnectionPool<CL>>();
private final ConcurrentHashMap<Host, HostConnectionPool<CL>> pingingPools = new ConcurrentHashMap<Host, HostConnectionPool<CL>>();
private final AtomicBoolean startedPing = new AtomicBoolean(false);
private final Integer SleepMillis;
private final Integer PoolReconnectWaitMillis;
public ConnectionPoolHealthTracker(ConnectionPoolConfiguration config, ScheduledExecutorService thPool) {
this(config, thPool, config.getHealthTrackerDelayMillis(), config.getPoolReconnectWaitMillis());
}
public ConnectionPoolHealthTracker(ConnectionPoolConfiguration config, ScheduledExecutorService thPool, int sleepMillis, int poolReconnectWaitMillis) {
cpConfiguration = config;
threadPool = thPool;
SleepMillis = sleepMillis;
PoolReconnectWaitMillis = poolReconnectWaitMillis;
}
public void removeHost(Host host) {
HostConnectionPool<CL> destPool = reconnectingPools.get(host);
if (destPool != null) {
Logger.info("Health tracker marking host as down " + host);
destPool.getHost().setStatus(Status.Down);
}
}
public void start() {
threadPool.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
if (stop.get() || Thread.currentThread().isInterrupted()) {
return;
}
Logger.debug("Running, pending pools size: " + reconnectingPools.size());
for (Host host : reconnectingPools.keySet()) {
if (!host.isUp()) {
Logger.info("Host: " + host + " is marked as down, evicting host from reconnection pool");
reconnectingPools.remove(host);
continue;
}
HostConnectionPool<CL> pool = reconnectingPools.get(host);
Logger.info("Checking for reconnecting pool for host: " + host + ", pool active? " + pool.isActive());
if (pool.isActive()) {
// Pool is already active. Move on
reconnectingPools.remove(host);
} else {
try {
Logger.info("Reconnecting pool : " + pool);
pool.markAsDown(null);
if (PoolReconnectWaitMillis > 0) {
Logger.debug("Sleeping to allow enough time to drain connections");
Thread.sleep(PoolReconnectWaitMillis);
}
pool.reconnect();
if (pool.isActive()) {
Logger.info("Host pool reactivated: " + host);
reconnectingPools.remove(host);
} else {
Logger.info("Could not re-activate pool, will try again later");
}
} catch (Exception e) {
// do nothing, will retry again once thread wakes up
Logger.warn("Failed to reconnect pool for host: " + host + " " + e.getMessage());
}
}
}
}
}, 1000, SleepMillis, TimeUnit.MILLISECONDS);
}
public void stop() {
stop.set(true);
}
@Override
public void trackConnectionError(HostConnectionPool<CL> hostPool, DynoException e) {
if (e != null && e instanceof TimeoutException) {
// don't track timeouts, since that may not be indicative of an actual n/w problem
// that may just be a slowdown due to pool saturation of larger payloads
return;
}
if (e != null && e instanceof PoolExhaustedException) {
String hostName = "Unknown";
if (hostPool.getHost() != null) {
hostName = hostPool.getHost().getHostAddress();
}
Logger.error(String.format("Attempting to reconnect pool to host %s due to PoolExhaustedException: %s",
e.getMessage(), hostName));
reconnectPool(hostPool);
return;
}
if (e != null && e instanceof FatalConnectionException) {
Host host = hostPool.getHost();
ErrorMonitor errorMonitor = errorRates.get(host);
if (errorMonitor == null) {
errorMonitor = cpConfiguration.getErrorMonitorFactory().createErrorMonitor(hostPool.size());
errorRates.putIfAbsent(host, errorMonitor);
errorMonitor = errorRates.get(host);
}
boolean errorRateOk = errorMonitor.trackError(1);
if (!errorRateOk) {
Logger.error("FAIL: Attempting to reconnect pool due to exceptions =>" + e.getMessage());
reconnectPool(hostPool);
}
}
}
public void reconnectPool(HostConnectionPool<CL> hostPool) {
Host host = hostPool.getHost();
Logger.error("Enqueueing host cp for recycling due to too many errors: " + hostPool);
hostPool.markAsDown(null);
reconnectingPools.put(host, hostPool);
}
public void initializePingHealthchecksForPool(HostConnectionPool<CL> hostPool) {
pingingPools.putIfAbsent(hostPool.getHost(), hostPool);
if (startedPing.get()) {
return;
}
if (pingingPools.size() > 0) {
if (startedPing.compareAndSet(false, true)) {
threadPool.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
for (HostConnectionPool<CL> hostPool : pingingPools.values()) {
pingHostPool(hostPool);
}
}
}, 1, cpConfiguration.getPingFrequencySeconds(), TimeUnit.SECONDS);
} else {
return;
}
} else {
return; // no pools to ping
}
}
private void pingHostPool(HostConnectionPool<CL> hostPool) {
for (Connection<CL> connection : hostPool.getAllConnections()) {
try {
connection.execPing();
} catch (DynoException e) {
trackConnectionError(hostPool, e);
}
}
}
// used for unit tests
ConcurrentHashMap<Host, HostConnectionPool<CL>> getReconnectingPools() {
return reconnectingPools;
}
}
| 6,057 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/health/SimpleErrorMonitorImpl.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.health;
import java.util.concurrent.atomic.AtomicInteger;
public class SimpleErrorMonitorImpl implements ErrorMonitor {
private final AtomicInteger errorCount = new AtomicInteger(0);
private final int threshold;
public SimpleErrorMonitorImpl(int numErrorThreshold) {
threshold = numErrorThreshold;
}
@Override
public boolean trackError(int numErrors) {
int currentCount = errorCount.addAndGet(numErrors);
if (currentCount >= threshold) {
// Reset the count
boolean success = errorCount.compareAndSet(currentCount, 0);
if (success) {
return false; // ERROR above threshold!
} else {
return true; // all OK. Someone else beat us to reporting the errors as above threshold
}
}
return true; // Errors NOT above threshold
}
public static class SimpleErrorMonitorFactory implements ErrorMonitorFactory {
private int threshold;
public SimpleErrorMonitorFactory() {
this(10); // default
}
public SimpleErrorMonitorFactory(int simpleErrorThreshold) {
this.threshold = simpleErrorThreshold;
}
@Override
public ErrorMonitor createErrorMonitor() {
return new SimpleErrorMonitorImpl(this.threshold);
}
@Override
public ErrorMonitor createErrorMonitor(int maxValue) {
return new SimpleErrorMonitorImpl(maxValue);
}
// TODO add setter and keep error threshold in sync with maxConnsPerHost OR switch to error rate monitor
}
}
| 6,058 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/health/ErrorRateMonitor.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.health;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.dyno.connectionpool.ErrorRateMonitorConfig;
import com.netflix.dyno.connectionpool.ErrorRateMonitorConfig.ErrorThreshold;
import com.netflix.dyno.connectionpool.impl.health.RateTracker.Bucket;
/**
* Class that can be used to track the error rates for {@link ConnectionPoolHealthTracker}
* It makes use of {@link RateTracker} to track the error rates and then periodically applies the
* {@link ErrorRateMonitorConfig} to apply error rate check policies to determine error threshold violations.
*
* @author poberai
*/
public class ErrorRateMonitor {
private final List<ErrorCheckPolicy> policies = new ArrayList<ErrorCheckPolicy>();
private final AtomicLong lastCheckTimestamp = new AtomicLong(0L);
private final AtomicLong suppressCheckTimestamp = new AtomicLong(0L);
private final AtomicReference<String> errorCheckLock = new AtomicReference<String>(null);
private final long errorCheckFrequencySeconds;
private final RateTracker rateTracker;
private final int suppressErrorWindow;
public ErrorRateMonitor(int windowSize, int checkFrequency, int suppressWindow) {
this.rateTracker = new RateTracker(windowSize);
this.errorCheckFrequencySeconds = checkFrequency;
this.lastCheckTimestamp.set(System.currentTimeMillis() / 1000);
this.suppressErrorWindow = suppressWindow;
}
public ErrorRateMonitor(ErrorRateMonitorConfig config) {
this(config.getWindowSizeSeconds(), config.getCheckFrequencySeconds(), config.getCheckSuppressWindowSeconds());
for (ErrorThreshold threshold : config.getThresholds()) {
addPolicy(new SimpleErrorCheckPolicy(threshold));
}
}
public void addPolicy(ErrorCheckPolicy policy) {
policies.add(policy);
}
public boolean trackErrorRate(int count) {
long timestamp = System.currentTimeMillis() / 1000;
this.rateTracker.trackRate(count);
if ((timestamp - lastCheckTimestamp.get()) >= errorCheckFrequencySeconds) {
if ((timestamp - suppressCheckTimestamp.get()) <= suppressErrorWindow) {
// don't check error. This is to prevent repeatedly firing alerts
return true;
}
String expected = errorCheckLock.get();
boolean casWon = errorCheckLock.compareAndSet(expected, UUID.randomUUID().toString());
if (casWon) {
// record that we checked
lastCheckTimestamp.set(timestamp);
boolean failure = false;
List<Bucket> buckets = rateTracker.getAllBuckets();
for (ErrorCheckPolicy policy : policies) {
failure = policy.checkErrorRate(buckets);
if (failure) {
break;
}
}
if (failure) {
// Set the timestamp to suppress subsequent alerts for the configured time period
suppressCheckTimestamp.set(timestamp);
}
return !failure;
}
}
return true;
}
public interface ErrorCheckPolicy {
public boolean checkErrorRate(List<Bucket> buckets);
}
public static class SimpleErrorCheckPolicy implements ErrorCheckPolicy {
private final int perBucketThreshold;
private final int windowSize;
private final int bucketCoveragePercentage;
public SimpleErrorCheckPolicy(int bucketThreshold, int numBuckets, int bucketCoverage) {
this.perBucketThreshold = bucketThreshold;
this.windowSize = numBuckets;
this.bucketCoveragePercentage = bucketCoverage;
}
public SimpleErrorCheckPolicy(ErrorThreshold threshold) {
this(threshold.getThresholdPerSecond(), threshold.getWindowSeconds(), threshold.getWindowCoveragePercentage());
}
@Override
public boolean checkErrorRate(List<Bucket> buckets) {
int minViolationBucketThreshold = (windowSize * bucketCoveragePercentage) / 100;
int numBucketsOverThreshold = 0;
for (Bucket b : buckets) {
if (b.count() >= perBucketThreshold) {
numBucketsOverThreshold++;
}
}
return numBucketsOverThreshold >= minViolationBucketThreshold;
}
}
// used for unit tests
RateTracker getRateTracker() {
return rateTracker;
}
}
| 6,059 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/AbstractTokenMapSupplier.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.lb;
import java.util.*;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.exception.TimeoutException;
import com.netflix.dyno.connectionpool.impl.utils.ConfigUtils;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.Host.Status;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils.Predicate;
/**
* An Example of the JSON payload that we get from dynomite-manager (this will eventually be changed so that the call
* is made directly to Dynomite)
* <pre>
* [
* {
* "dc": "eu-west-1",
* "hostname": "ec2-52-208-92-24.eu-west-1.compute.amazonaws.com",
* "ip": "52.208.92.24",
* "rack": "dyno_sandbox--euwest1c",
* "token": "1383429731",
* "zone": "eu-west-1c",
* "hashtag" : "{}"
* },
* {
* "dc": "us-east-1",
* "hostname": "ec2-52-90-147-135.compute-1.amazonaws.com",
* "ip": "52.90.147.135",
* "rack": "dyno_sandbox--useast1c",
* "token": "1383429731",
* "zone": "us-east-1c",
* "hashtag" : "{}"
* },
* {
* "dc": "us-east-1",
* "hostname": "ec2-52-23-207-227.compute-1.amazonaws.com",
* "ip": "52.23.207.227",
* "rack": "dyno_sandbox--useast1e",
* "token": "1383429731",
* "zone": "us-east-1e",
* "hashtag" : "{}"
* },
* {
* "dc": "eu-west-1",
* "hostname": "ec2-52-209-165-110.eu-west-1.compute.amazonaws.com",
* "ip": "52.209.165.110",
* "rack": "dyno_sandbox--euwest1a",
* "token": "1383429731",
* "zone": "eu-west-1a",
* "hashtag" : "{}"
* },
* {
* "dc": "eu-west-1",
* "hostname": "ec2-52-16-89-77.eu-west-1.compute.amazonaws.com",
* "ip": "52.16.89.77",
* "rack": "dyno_sandbox--euwest1b",
* "token": "1383429731",
* "zone": "eu-west-1b",
* "hashtag" : "{}"
*
* },
* {
* "dc": "us-east-1",
* "hostname": "ec2-54-208-235-30.compute-1.amazonaws.com",
* "ip": "54.208.235.30",
* "rack": "dyno_sandbox--useast1d",
* "token": "1383429731",
* "zone": "us-east-1d",
* "hashtag" : "{}"
*
* }
* ]
* </pre>
*
* @author poberai
* @author ipapapa
*/
public abstract class AbstractTokenMapSupplier implements TokenMapSupplier {
private static final Logger Logger = LoggerFactory.getLogger(AbstractTokenMapSupplier.class);
private final String localZone;
private final String localDatacenter;
private int unsuppliedPort = -1;
public AbstractTokenMapSupplier(String localRack) {
this.localZone = localRack;
localDatacenter = ConfigUtils.getDataCenter();
}
public AbstractTokenMapSupplier(String localRack, int port) {
this.localZone = localRack;
localDatacenter = ConfigUtils.getDataCenter();
unsuppliedPort = port;
}
public AbstractTokenMapSupplier() {
localZone = ConfigUtils.getLocalZone();
localDatacenter = ConfigUtils.getDataCenter();
}
public AbstractTokenMapSupplier(int port) {
localZone = ConfigUtils.getLocalZone();
localDatacenter = ConfigUtils.getDataCenter();
unsuppliedPort = port;
}
public abstract String getTopologyJsonPayload(Set<Host> activeHosts);
public abstract String getTopologyJsonPayload(String hostname);
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
// Doing this since not all tokens are received from an individual call
// to a dynomite server
// hence trying them all
Set<HostToken> allTokens = new HashSet<HostToken>();
Set<Host> remainingHosts = new HashSet<>(activeHosts);
for (Host host : activeHosts) {
try {
List<HostToken> hostTokens = parseTokenListFromJson(getTopologyJsonPayload((host.getHostAddress())));
for (HostToken hToken : hostTokens) {
allTokens.add(hToken);
remainingHosts.remove(hToken.getHost());
}
if (remainingHosts.size() == 0) {
Logger.info("Received token information for " + allTokens.size() + " hosts. Not querying other hosts");
break;
}
} catch (Exception e) {
Logger.warn("Could not get json response for token topology [" + e.getMessage() + "]");
}
}
return new ArrayList<>(allTokens);
}
@Override
public HostToken getTokenForHost(final Host host, final Set<Host> activeHosts) {
String jsonPayload;
if (activeHosts.size() == 0) {
jsonPayload = getTopologyJsonPayload(host.getHostAddress());
} else {
try {
jsonPayload = getTopologyJsonPayload(activeHosts);
} catch (TimeoutException ex) {
// Try using the host we just primed connections to. If that
// fails,
// let the exception bubble up to ConnectionPoolImpl which will
// remove
// the host from the host-mapping
jsonPayload = getTopologyJsonPayload(host.getHostAddress());
}
}
List<HostToken> hostTokens = parseTokenListFromJson(jsonPayload);
return CollectionUtils.find(hostTokens, new Predicate<HostToken>() {
@Override
public boolean apply(HostToken x) {
return x.getHost().compareTo(host) == 0;
}
});
}
private boolean isLocalZoneHost(Host host) {
if (localZone == null || localZone.isEmpty()) {
Logger.warn("Local rack was not defined");
return true; // consider everything
}
return localZone.equalsIgnoreCase(host.getRack());
}
private boolean isLocalDatacenterHost(Host host) {
if (localDatacenter == null || localDatacenter.isEmpty()) {
Logger.warn("Local Datacenter was not defined");
return true;
}
return localDatacenter.equalsIgnoreCase(host.getDatacenter());
}
// package-private for Test
List<HostToken> parseTokenListFromJson(String json) {
List<HostToken> hostTokens = new ArrayList<HostToken>();
JSONParser parser = new JSONParser();
try {
JSONArray arr = (JSONArray) parser.parse(json);
Iterator<?> iter = arr.iterator();
while (iter.hasNext()) {
Object item = iter.next();
if (!(item instanceof JSONObject)) {
continue;
}
JSONObject jItem = (JSONObject) item;
Long token = Long.parseLong((String) jItem.get("token"));
String hostname = (String) jItem.get("hostname");
String ipAddress = (String) jItem.get("ip");
String zone = (String) jItem.get("zone");
String datacenter = (String) jItem.get("dc");
String portStr = (String) jItem.get("port");
String securePortStr = (String) jItem.get("secure_port");
String hashtag = (String) jItem.get("hashtag");
int port = Host.DEFAULT_PORT;
if (portStr != null) {
port = Integer.valueOf(portStr);
}
int securePort = port;
if (securePortStr != null) {
securePort = Integer.valueOf(securePortStr);
}
Host host = new HostBuilder().setHostname(hostname).setIpAddress(ipAddress).setPort(port).setSecurePort(securePort).setRack(zone).setDatacenter(datacenter).setStatus(Status.Up).setHashtag(hashtag).createHost();
if (isLocalDatacenterHost(host)) {
HostToken hostToken = new HostToken(token, host);
hostTokens.add(hostToken);
}
}
} catch (ParseException e) {
Logger.error("Failed to parse json response: " + json, e);
throw new RuntimeException(e);
}
return hostTokens;
}
}
| 6,060 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/HostToken.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.lb;
import com.netflix.dyno.connectionpool.Host;
/**
* Simple class that encapsulates a host and it's token on the dynomite topology ring.
* The class must implements Comparable<Long> so that it can be stored in a sorted collection that can then be
* used in search algos like binary search for efficiently finding the owning token for a hash operation key.
*
* @author poberai
*/
public class HostToken implements Comparable<Long> {
private final Long token;
private final Host host;
public HostToken(Long token, Host host) {
this.token = token;
this.host = host;
}
public Long getToken() {
return token;
}
public Host getHost() {
return host;
}
@Override
public String toString() {
return "HostToken [token=" + token + ", host=" + host + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((host == null) ? 0 : host.hashCode());
result = prime * result + ((token == null) ? 0 : token.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
HostToken other = (HostToken) obj;
boolean equals = true;
equals &= (token != null) ? (token.equals(other.token)) : (other.token == null);
equals &= (host != null) ? (host.equals(other.host)) : (other.host == null);
return equals;
}
@Override
public int compareTo(Long o) {
return this.token.compareTo(o);
}
public int compareTo(HostToken o) {
return this.token.compareTo(o.getToken());
}
}
| 6,061 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/HostSelectionWithFallback.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.lb;
import com.netflix.dyno.connectionpool.BaseOperation;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration.LoadBalancingStrategy;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.RetryPolicy;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.TokenPoolTopology;
import com.netflix.dyno.connectionpool.TokenRackMapper;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.exception.PoolExhaustedException;
import com.netflix.dyno.connectionpool.exception.PoolOfflineException;
import com.netflix.dyno.connectionpool.exception.PoolTimeoutException;
import com.netflix.dyno.connectionpool.impl.HostSelectionStrategy;
import com.netflix.dyno.connectionpool.impl.HostSelectionStrategy.HostSelectionStrategyFactory;
import com.netflix.dyno.connectionpool.impl.RunOnce;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils.Predicate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
/**
* Acts as a coordinator over multiple HostSelectionStrategy implementations, where each maps to a particular rack.
* This class doesn't actually implement the logic (e.g Round Robin or Token Aware) to borrow the connections. It
* relies on a local HostSelectionStrategy implementation and a collection of remote HostSelectionStrategy(s).
* It gives preference to the "local" HostSelectionStrategy but if the local pool is offline or hosts are down etc, then
* it falls back to the remote HostSelectionStrategy. Also it uses pure round robin for distributing load on the fall
* back HostSelectionStrategy implementations for even distribution of load on the remote racks in the event of an
* outage in the local rack.
* <p>
* Note that this class does not prefer any one remote HostSelectionStrategy over another.
*
* @param <CL>
* @author poberai
* @author jcacciatore
*/
public class HostSelectionWithFallback<CL> {
private static final Logger logger = LoggerFactory.getLogger(HostSelectionWithFallback.class);
// Only used in calculating replication factor
private final String localDataCenter;
// tracks the local zone
private final String localRack;
// The selector for the local zone
private final HostSelectionStrategy<CL> localSelector;
// Track selectors for each remote zone
private final ConcurrentHashMap<String, HostSelectionStrategy<CL>> remoteRackSelectors = new ConcurrentHashMap<>();
private final ConcurrentHashMap<Host, HostToken> hostTokens = new ConcurrentHashMap<>();
private final TokenMapSupplier tokenSupplier;
private final ConnectionPoolConfiguration cpConfig;
private final ConnectionPoolMonitor cpMonitor;
private final AtomicInteger replicationFactor = new AtomicInteger(-1);
// Represents the *initial* topology from the token supplier. This does not affect selection of a host connection
// pool for traffic. It only affects metrics such as failover/fallback
private final AtomicReference<TokenPoolTopology> topology = new AtomicReference<>(null);
// list of names of remote zones. Used for RoundRobin over remote zones when local zone host is down
private final CircularList<String> remoteRackNames = new CircularList<>(new ArrayList<>());
private final HostSelectionStrategyFactory<CL> selectorFactory;
public HostSelectionWithFallback(ConnectionPoolConfiguration config, ConnectionPoolMonitor monitor) {
cpMonitor = monitor;
cpConfig = config;
localRack = cpConfig.getLocalRack();
localDataCenter = cpConfig.getLocalDataCenter();
tokenSupplier = cpConfig.getTokenSupplier();
selectorFactory = new DefaultSelectionFactory(cpConfig);
localSelector = selectorFactory.vendPoolSelectionStrategy();
}
public Connection<CL> getConnection(BaseOperation<CL, ?> op, int duration, TimeUnit unit) throws NoAvailableHostsException, PoolExhaustedException {
return getConnection(op, null, duration, unit, cpConfig.getRetryPolicyFactory().getRetryPolicy());
}
public Connection<CL> getConnectionUsingRetryPolicy(BaseOperation<CL, ?> op, int duration, TimeUnit unit, RetryPolicy retry) throws NoAvailableHostsException, PoolExhaustedException {
return getConnection(op, null, duration, unit, retry);
}
private Connection<CL> getConnection(BaseOperation<CL, ?> op, Long token, int duration, TimeUnit unit, RetryPolicy retry)
throws NoAvailableHostsException, PoolExhaustedException, PoolTimeoutException, PoolOfflineException {
DynoConnectException lastEx = null;
HostConnectionPool<CL> hostPool = null;
if (retry.getAttemptCount() == 0 || (retry.getAttemptCount() > 0 && !retry.allowCrossZoneFallback())) {
// By default zone affinity is enabled; if the local rack is not known at startup it is disabled
if (cpConfig.localZoneAffinity()) {
hostPool = getHostPoolForOperationOrTokenInLocalZone(op, token);
} else {
hostPool = getFallbackHostPool(op, token);
}
}
if (hostPool != null) {
try {
// Note that if a PoolExhaustedException is thrown it is caught by the calling
// ConnectionPoolImpl#executeXXX() method
return hostPool.borrowConnection(duration, unit);
} catch (PoolTimeoutException pte) {
lastEx = pte;
cpMonitor.incOperationFailure(null, pte);
}
}
if (attemptFallback()) {
if (topology.get().getTokensForRack(localRack) != null) {
cpMonitor.incFailover(null, lastEx);
}
hostPool = getFallbackHostPool(op, token);
if (hostPool != null) {
return hostPool.borrowConnection(duration, unit);
}
}
if (lastEx == null) {
throw new PoolOfflineException(hostPool == null ? null : hostPool.getHost(), "host pool is offline and no Racks available for fallback");
} else {
throw lastEx;
}
}
// Should be called when a connection is required on that particular zone with no fall backs what so ever
private Connection<CL> getConnectionForTokenOnRackNoFallback(BaseOperation<CL, ?> op, Long token, String rack, int duration, TimeUnit unit, RetryPolicy retry)
throws NoAvailableHostsException, PoolExhaustedException, PoolTimeoutException, PoolOfflineException {
DynoConnectException lastEx = null;
// find the selector for that rack,
HostSelectionStrategy<CL> selector = findSelectorForRack(rack);
// get the host using that selector
HostConnectionPool<CL> hostPool = selector.getPoolForToken(token);
if (hostPool != null) {
try {
// Note that if a PoolExhaustedException is thrown it is caught by the calling
// ConnectionPoolImpl#executeXXX() method
return hostPool.borrowConnection(duration, unit);
} catch (PoolTimeoutException pte) {
lastEx = pte;
cpMonitor.incOperationFailure(null, pte);
}
}
if (lastEx == null) {
throw new PoolOfflineException(hostPool == null ? null : hostPool.getHost(), "host pool is offline and we are forcing no fallback");
} else {
throw lastEx;
}
}
private HostConnectionPool<CL> getHostPoolForOperationOrTokenInLocalZone(BaseOperation<CL, ?> op, Long token) {
HostConnectionPool<CL> hostPool;
try {
if (!localSelector.isEmpty()) {
hostPool = (op != null) ? localSelector.getPoolForOperation(op, cpConfig.getHashtag()) : localSelector.getPoolForToken(token);
if (isConnectionPoolActive(hostPool)) {
return hostPool;
}
}
} catch (NoAvailableHostsException e) {
cpMonitor.incOperationFailure(null, e);
}
return null;
}
private boolean attemptFallback() {
return cpConfig.getMaxFailoverCount() > 0 &&
(cpConfig.localZoneAffinity() && remoteRackNames.getEntireList().size() > 0) ||
(!cpConfig.localZoneAffinity() && !localSelector.isEmpty());
}
private HostConnectionPool<CL> getFallbackHostPool(BaseOperation<CL, ?> op, Long token) {
int numRemotes = remoteRackNames.getEntireList().size();
if (numRemotes == 0) {
throw new NoAvailableHostsException("Could not find any remote Racks for fallback");
}
int numTries = Math.min(numRemotes, cpConfig.getMaxFailoverCount());
DynoException lastEx = null;
while ((numTries > 0)) {
numTries--;
String remoteRack = remoteRackNames.getNextElement();
HostSelectionStrategy<CL> remoteRackSelector = remoteRackSelectors.get(remoteRack);
try {
HostConnectionPool<CL> fallbackHostPool =
(op != null) ? remoteRackSelector.getPoolForOperation(op, cpConfig.getHashtag()) : remoteRackSelector.getPoolForToken(token);
if (isConnectionPoolActive(fallbackHostPool)) {
return fallbackHostPool;
}
} catch (NoAvailableHostsException e) {
cpMonitor.incOperationFailure(null, e);
lastEx = e;
}
}
if (lastEx != null) {
throw lastEx;
} else {
throw new NoAvailableHostsException("Local rack host offline and could not find any remote hosts for fallback connection");
}
}
public Collection<Connection<CL>> getConnectionsToRing(TokenRackMapper tokenRackMapper, int duration, TimeUnit unit) throws NoAvailableHostsException, PoolExhaustedException {
String targetRack = localRack;
if (targetRack == null) {
// get tokens for random rack
targetRack = topology.get().getRandomRack();
}
final Set<Long> tokens = topology.get().getTokenHostsForRack(targetRack).keySet();
DynoConnectException lastEx = null;
final List<Connection<CL>> connections = new ArrayList<>();
for (Long token : tokens) {
try {
// Cursor has a map of token to rack which indicates an affinity to a zone for that token.
// This is valid in case of an iterator based query like scan.
// Try to use that same rack if it is specified.
String rack = null;
if (tokenRackMapper != null)
rack = tokenRackMapper.getRackForToken(token);
if (rack != null) {
connections.add(getConnectionForTokenOnRackNoFallback(null, token, rack, duration, unit, new RunOnce()));
} else {
Connection<CL> c = getConnection(null, token, duration, unit, new RunOnce());
if (tokenRackMapper != null) {
tokenRackMapper.setRackForToken(token, c.getHost().getRack());
}
connections.add(c);
}
} catch (DynoConnectException e) {
logger.warn("Failed to get connection when getting all connections from ring", e.getMessage());
lastEx = e;
break;
}
}
if (lastEx != null) {
// Return all previously borrowed connection to avoid any connection leaks
for (Connection<CL> connection : connections) {
try {
connection.getParentConnectionPool().returnConnection(connection);
} catch (DynoConnectException e) {
// do nothing
}
}
throw lastEx;
} else {
return connections;
}
}
private HostSelectionStrategy<CL> findSelectorForRack(String rack) {
if (localRack == null) {
return localSelector;
}
if (localRack.equals(rack)) {
return localSelector;
}
HostSelectionStrategy<CL> remoteSelector = remoteRackSelectors.get(rack);
return remoteSelector;
}
private boolean isConnectionPoolActive(HostConnectionPool<CL> hPool) {
if (hPool == null) {
return false;
}
Host host = hPool.getHost();
return host.isUp() && hPool.isActive();
}
private Map<HostToken, HostConnectionPool<CL>> getHostPoolsForRack(final Map<HostToken, HostConnectionPool<CL>> map, final String rack) {
Map<HostToken, HostConnectionPool<CL>> dcPools =
CollectionUtils.filterKeys(map, new Predicate<HostToken>() {
@Override
public boolean apply(HostToken x) {
if (localRack == null) {
return true;
}
return rack.equals(x.getHost().getRack());
}
});
return dcPools;
}
/**
* Initialize the topology with the host pools obtained from discovery.
* hPools comes from discovery.
*
* @param hPools
*/
public void initWithHosts(Map<Host, HostConnectionPool<CL>> hPools) {
// Get the list of tokens for these hosts
//tokenSupplier.initWithHosts(hPools.keySet());
List<HostToken> allHostTokens = tokenSupplier.getTokens(hPools.keySet());
Map<HostToken, HostConnectionPool<CL>> tokenPoolMap = new HashMap<HostToken, HostConnectionPool<CL>>();
// Update inner state with the host tokens.
for (HostToken hToken : allHostTokens) {
hostTokens.put(hToken.getHost(), hToken);
tokenPoolMap.put(hToken, hPools.get(hToken.getHost()));
}
// Initialize Local selector
Map<HostToken, HostConnectionPool<CL>> localPools = getHostPoolsForRack(tokenPoolMap, localRack);
localSelector.initWithHosts(localPools);
if (localSelector.isTokenAware()) {
replicationFactor.set(HostUtils.calculateReplicationFactorForDC(allHostTokens, cpConfig.getLocalDataCenter(), localRack));
}
// Initialize Remote selectors
Set<String> remoteRacks = hPools.keySet().stream().map(h -> h.getRack()).filter(rack -> rack != null && !rack.equals(localRack)).collect(Collectors.toSet());
for (String rack : remoteRacks) {
Map<HostToken, HostConnectionPool<CL>> dcPools = getHostPoolsForRack(tokenPoolMap, rack);
HostSelectionStrategy<CL> remoteSelector = selectorFactory.vendPoolSelectionStrategy();
remoteSelector.initWithHosts(dcPools);
remoteRackSelectors.put(rack, remoteSelector);
}
remoteRackNames.swapWithList(remoteRackSelectors.keySet());
topology.set(createTokenPoolTopology(allHostTokens));
}
/**
* Calculate replication factor from the given list of hosts
*
* @param allHostTokens
* @return replicationFactor
*/
int calculateReplicationFactor(List<HostToken> allHostTokens) {
return HostUtils.calculateReplicationFactorForDC(allHostTokens, null, localRack);
}
public void addHost(Host host, HostConnectionPool<CL> hostPool) {
HostToken hostToken = tokenSupplier.getTokenForHost(host, hostTokens.keySet());
if (hostToken == null) {
throw new DynoConnectException("Could not find host token for host: " + host);
}
hostTokens.put(hostToken.getHost(), hostToken);
HostSelectionStrategy<CL> selector = findSelectorForRack(host.getRack());
if (selector != null) {
selector.addHostPool(hostToken, hostPool);
}
topology.get().addHostToken(hostToken.getHost().getRack(), hostToken.getToken(), hostToken.getHost());
}
public void removeHost(Host host) {
HostToken hostToken = hostTokens.remove(host);
if (hostToken != null) {
HostSelectionStrategy<CL> selector = findSelectorForRack(host.getRack());
if (selector != null) {
selector.removeHostPool(hostToken);
}
topology.get().removeHost(hostToken.getHost().getRack(), hostToken.getToken(), hostToken.getHost());
}
}
private class DefaultSelectionFactory implements HostSelectionStrategyFactory<CL> {
private final LoadBalancingStrategy lbStrategy;
private final HashPartitioner hashPartitioner;
private DefaultSelectionFactory(ConnectionPoolConfiguration config) {
lbStrategy = config.getLoadBalancingStrategy();
hashPartitioner = config.getHashPartitioner();
}
@Override
public HostSelectionStrategy<CL> vendPoolSelectionStrategy() {
switch (lbStrategy) {
case RoundRobin:
return new RoundRobinSelection<CL>();
case TokenAware:
return hashPartitioner != null
? new TokenAwareSelection<CL>(hashPartitioner)
: new TokenAwareSelection<CL>();
default:
throw new RuntimeException("LoadBalancing strategy not supported! " + cpConfig.getLoadBalancingStrategy().name());
}
}
}
private void updateTokenPoolTopology(TokenPoolTopology topology) {
if (localRack != null) {
addTokens(topology, localRack, localSelector);
}
for (String remoteRack : remoteRackSelectors.keySet()) {
addTokens(topology, remoteRack, remoteRackSelectors.get(remoteRack));
}
}
/**
* Create token pool topology from the host tokens
*
* @param allHostTokens
* @return tokenPoolTopology with the host information
*/
public TokenPoolTopology createTokenPoolTopology(List<HostToken> allHostTokens) {
TokenPoolTopology topology = new TokenPoolTopology(replicationFactor.get());
for (HostToken hostToken : allHostTokens) {
String rack = hostToken.getHost().getRack();
topology.addHostToken(rack, hostToken.getToken(), hostToken.getHost());
}
updateTokenPoolTopology(topology);
return topology;
}
public TokenPoolTopology getTokenPoolTopology() {
TokenPoolTopology topology = new TokenPoolTopology(replicationFactor.get());
updateTokenPoolTopology(topology);
return topology;
}
private void addTokens(TokenPoolTopology topology, String rack, HostSelectionStrategy<CL> selectionStrategy) {
Collection<HostConnectionPool<CL>> pools = selectionStrategy.getOrderedHostPools();
for (HostConnectionPool<CL> pool : pools) {
if (pool == null) {
continue;
}
HostToken hToken = hostTokens.get(pool.getHost());
if (hToken == null) {
continue;
}
topology.addToken(rack, hToken.getToken(), pool);
}
}
public Long getTokenForKey(String key) {
return localSelector.getTokenForKey(key).getToken();
}
@Override
public String toString() {
return "HostSelectionWithFallback{" +
"localDataCenter='" + localDataCenter + '\'' +
", localRack='" + localRack + '\'' +
", localSelector=" + localSelector +
", remoteRackSelectors=" + remoteRackSelectors +
", hostTokens=" + hostTokens +
", tokenSupplier=" + tokenSupplier +
", cpConfig=" + cpConfig +
", cpMonitor=" + cpMonitor +
", replicationFactor=" + replicationFactor +
", topology=" + topology +
", remoteRackNames=" + remoteRackNames +
", selectorFactory=" + selectorFactory +
'}';
}
}
| 6,062 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/CircularList.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.lb;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* Utility class that encapsulates a thread safe circular list. It also provides functionality to be able to dynamically add and remove
* elements from the list in a thread safe manner while callers to the class are still using the list.
* <p>
* This utility is mainly useful for ROUND ROBIN style load balancers. It is also useful for Connection pool monitors that need to track
* state of operations against a connection pool in a bounded circular buffer
*
* @param <T>
* @author poberai
*/
public class CircularList<T> {
// The thread safe reference to the inner list. Maintaining an atomic ref at this level helps enabling swapping out of the entire list
// underneath when there is a change to the list such as element addition or removal
private final AtomicReference<InnerList> ref = new AtomicReference<InnerList>(null);
/**
* Constructor
*
* @param origList
*/
public CircularList(Collection<T> origList) {
ref.set(new InnerList(origList));
}
/**
* Get the next element in the list
*
* @return T
*/
public T getNextElement() {
return ref.get().getNextElement();
}
/**
* Swap the entire inner list with a new list
*
* @param newList
*/
public void swapWithList(Collection<T> newList) {
InnerList newInnerList = new InnerList(newList);
ref.set(newInnerList);
}
/**
* Add an element to the list. This causes the inner list to be swapped out
*
* @param element
*/
public synchronized void addElement(T element) {
List<T> origList = ref.get().list;
boolean isPresent = origList.contains(element);
if (isPresent) {
return;
}
List<T> newList = new ArrayList<T>(origList);
newList.add(element);
swapWithList(newList);
}
/**
* Remove an element from this list. This causes the inner list to be swapped out
*
* @param element
*/
public synchronized void removeElement(T element) {
List<T> origList = ref.get().list;
boolean isPresent = origList.contains(element);
if (!isPresent) {
return;
}
List<T> newList = new ArrayList<T>(origList);
newList.remove(element);
swapWithList(newList);
}
/**
* Helpful utility to access the inner list. Must be used with care since the inner list can change.
*
* @return List<T>
*/
public List<T> getEntireList() {
InnerList iList = ref.get();
return iList != null ? iList.getList() : null;
}
/**
* Gets the size of the bounded list underneath. Note that this num can change if the inner list is swapped out.
*
* @return
*/
public int getSize() {
InnerList iList = ref.get();
return iList != null ? iList.getList().size() : 0;
}
/**
* The inner list which manages the circular access to the actual list.
*
* @author poberai
*/
private class InnerList {
private final List<T> list;
private final Integer size;
// The rotating index over the list. currentIndex always indicates the index of the element that was last accessed
// Using AtomicLong instead of AtomicInteger to avoid resetting value on overflow. Range of long is good enough
// to not wrap currentIndex.
private final AtomicLong currentIndex = new AtomicLong(0L);
private InnerList(Collection<T> newList) {
if (newList != null) {
list = new ArrayList<>(newList);
size = list.size();
} else {
list = null;
size = 0;
}
}
private int getNextIndex() {
return (int) (currentIndex.incrementAndGet() % size);
}
private T getNextElement() {
return (list == null || list.size() == 0) ? null : list.get(getNextIndex());
}
private List<T> getList() {
return list;
}
}
}
| 6,063 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/HttpEndpointBasedTokenMapSupplier.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.lb;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.TimeoutException;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils.Predicate;
import com.netflix.dyno.connectionpool.impl.utils.IOUtilities;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
import org.apache.http.params.HttpConnectionParams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.Set;
public class HttpEndpointBasedTokenMapSupplier extends AbstractTokenMapSupplier {
private static final Logger Logger = LoggerFactory.getLogger(HttpEndpointBasedTokenMapSupplier.class);
private static final String DefaultServerUrl = "http://{hostname}:{port}/REST/v1/admin/cluster_describe";
private static final Integer NUM_RETRIES_PER_NODE = 2;
private static final Integer NUM_RETRIER_ACROSS_NODES = 2;
private static final Integer defaultPort = 8080;
private final String serverUrl;
public HttpEndpointBasedTokenMapSupplier() {
this(DefaultServerUrl, defaultPort);
}
public HttpEndpointBasedTokenMapSupplier(int port) {
this(DefaultServerUrl, port);
}
public HttpEndpointBasedTokenMapSupplier(String url, int port) {
super(port);
/**
* If no port is passed means -1 then we will substitute to defaultPort
* else the passed one.
*/
url = url.replace("{port}", (port > -1) ? Integer.toString(port) : Integer.toString(defaultPort));
serverUrl = url;
}
@Override
public String getTopologyJsonPayload(String hostname) {
try {
return getResponseViaHttp(hostname);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Tries to get topology information by randomly trying across nodes.
*/
@Override
public String getTopologyJsonPayload(Set<Host> activeHosts) {
int count = NUM_RETRIER_ACROSS_NODES;
String response;
Exception lastEx = null;
do {
try {
response = getTopologyFromRandomNodeWithRetry(activeHosts);
if (response != null) {
return response;
}
} catch (Exception e) {
lastEx = e;
} finally {
count--;
}
} while ((count > 0));
if (lastEx != null) {
if (lastEx instanceof ConnectTimeoutException) {
throw new TimeoutException("Unable to obtain topology", lastEx);
}
throw new DynoException(lastEx);
} else {
throw new DynoException("Could not contact dynomite for token map");
}
}
private String getResponseViaHttp(String hostname) throws Exception {
String url = serverUrl;
url = url.replace("{hostname}", hostname);
if (Logger.isDebugEnabled()) {
Logger.debug("Making http call to url: " + url);
}
DefaultHttpClient client = new DefaultHttpClient();
client.getParams().setParameter(HttpConnectionParams.CONNECTION_TIMEOUT, 2000);
client.getParams().setParameter(HttpConnectionParams.SO_TIMEOUT, 5000);
DefaultHttpRequestRetryHandler retryhandler = new DefaultHttpRequestRetryHandler(NUM_RETRIER_ACROSS_NODES,
true);
client.setHttpRequestRetryHandler(retryhandler);
HttpGet get = new HttpGet(url);
HttpResponse response = client.execute(get);
int statusCode = response.getStatusLine().getStatusCode();
if (!(statusCode == 200)) {
Logger.error("Got non 200 status code from " + url);
return null;
}
InputStream in = null;
try {
in = response.getEntity().getContent();
return IOUtilities.toString(in);
} finally {
if (in != null) {
in.close();
}
}
}
/**
* Finds a random host from the set of active hosts to perform
* cluster_describe
*
* @param activeHosts
* @return a random host
*/
public Host getRandomHost(Set<Host> activeHosts) {
Random random = new Random();
List<Host> hostsUp = new ArrayList<Host>(CollectionUtils.filter(activeHosts, new Predicate<Host>() {
@Override
public boolean apply(Host x) {
return x.isUp();
}
}));
return hostsUp.get(random.nextInt(hostsUp.size()));
}
/**
* Tries multiple nodes, and it only bubbles up the last node's exception.
* We want to bubble up the exception in order for the last node to be
* removed from the connection pool.
*
* @param activeHosts
* @return the topology from cluster_describe
*/
private String getTopologyFromRandomNodeWithRetry(Set<Host> activeHosts) {
int count = NUM_RETRIES_PER_NODE;
String nodeResponse;
Exception lastEx;
final Host randomHost = getRandomHost(activeHosts);
do {
try {
lastEx = null;
nodeResponse = getResponseViaHttp(randomHost.getHostName());
if (nodeResponse != null) {
Logger.info("Received topology from " + randomHost);
return nodeResponse;
}
} catch (Exception e) {
Logger.info("cannot get topology from : " + randomHost);
lastEx = e;
} finally {
count--;
}
} while ((count > 0));
if (lastEx != null) {
if (lastEx instanceof ConnectTimeoutException) {
throw new TimeoutException("Unable to obtain topology", lastEx).setHost(randomHost);
}
throw new DynoException(String.format("Unable to obtain topology from %s", randomHost), lastEx);
} else {
throw new DynoException(String.format("Could not contact dynomite manager for token map on %s", randomHost));
}
}
}
| 6,064 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/HostUtils.java | package com.netflix.dyno.connectionpool.impl.lb;
import com.netflix.dyno.connectionpool.Host;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class HostUtils {
private static final Logger logger = LoggerFactory.getLogger(HostSelectionWithFallback.class);
/**
* Calculate replication factor from the given list of hosts
*
* @param allHostTokens
* @param localRack
* @return replicationFactor
*/
public static int calculateReplicationFactor(List<HostToken> allHostTokens, String localRack) {
return calculateReplicationFactorForDC(allHostTokens, null, localRack);
}
/**
* Calculate replication factor for a datacenter.
* If datacenter is null we use one of the hosts from the list and use its DC.
*
* @param allHostTokens
* @param dataCenter
* @param localRack
* @return replicationFactor for the dataCenter
*/
public static int calculateReplicationFactorForDC(List<HostToken> allHostTokens, String dataCenter, String localRack) {
Map<Long, Integer> groups = new HashMap<>();
Set<HostToken> uniqueHostTokens = new HashSet<>(allHostTokens);
if (dataCenter == null) {
if (localRack != null) {
dataCenter = localRack.substring(0, localRack.length() - 1);
} else {
// No DC specified. Get the DC from the first host and use its replication factor
Host host = allHostTokens.get(0).getHost();
String curRack = host.getRack();
dataCenter = curRack.substring(0, curRack.length() - 1);
}
}
for (HostToken hostToken : uniqueHostTokens) {
if (hostToken.getHost().getRack().contains(dataCenter)) {
Long token = hostToken.getToken();
if (groups.containsKey(token)) {
int current = groups.get(token);
groups.put(token, current + 1);
} else {
groups.put(token, 1);
}
}
}
Set<Integer> uniqueCounts = new HashSet<>(groups.values());
if (uniqueCounts.size() > 1) {
throw new RuntimeException("Invalid configuration - replication factor cannot be asymmetric");
}
int rf = uniqueCounts.toArray(new Integer[uniqueCounts.size()])[0];
if (rf > 3) {
logger.warn("Replication Factor is high: " + uniqueHostTokens);
}
return rf;
}
}
| 6,065 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/TokenAwareSelection.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.lb;
import com.netflix.dyno.connectionpool.BaseOperation;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.Operation;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.HostSelectionStrategy;
import com.netflix.dyno.connectionpool.impl.hash.BinarySearchTokenMapper;
import com.netflix.dyno.connectionpool.impl.hash.Murmur1HashPartitioner;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils.Transform;
import org.apache.commons.lang3.StringUtils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Concrete implementation of the {@link HostSelectionStrategy} interface using
* the TOKEN AWARE algorithm. Note that this component needs to be aware of the
* dynomite ring topology to be able to successfully map to the correct token
* owner for any key of an {@link Operation}
*
* @param <CL>
* @author poberai
* @author ipapapa
*/
public class TokenAwareSelection<CL> implements HostSelectionStrategy<CL> {
private final BinarySearchTokenMapper tokenMapper;
private final ConcurrentHashMap<Long, HostConnectionPool<CL>> tokenPools = new ConcurrentHashMap<Long, HostConnectionPool<CL>>();
public TokenAwareSelection() {
this(new Murmur1HashPartitioner());
}
public TokenAwareSelection(HashPartitioner hashPartitioner) {
this.tokenMapper = new BinarySearchTokenMapper(hashPartitioner);
}
@Override
public void initWithHosts(Map<HostToken, HostConnectionPool<CL>> hPools) {
tokenPools.putAll(CollectionUtils.transformMapKeys(hPools, new Transform<HostToken, Long>() {
@Override
public Long get(HostToken x) {
return x.getToken();
}
}));
this.tokenMapper.initSearchMechanism(hPools.keySet());
}
/**
* Identifying the proper pool for the operation. A couple of things that may affect the decision
* (a) hashtags: In this case we will construct the key by decomposing from the hashtag
* (b) type of key: string keys vs binary keys.
* In binary keys hashtags do not really matter.
*/
@Override
public HostConnectionPool<CL> getPoolForOperation(BaseOperation<CL, ?> op, String hashtag) throws NoAvailableHostsException {
String key = op.getStringKey();
HostConnectionPool<CL> hostPool;
HostToken hToken;
if (key != null) {
// If a hashtag is provided by Dynomite then we use that to create the key to hash.
if (hashtag == null || hashtag.isEmpty()) {
hToken = this.getTokenForKey(key);
} else {
String hashValue = StringUtils.substringBetween(key, Character.toString(hashtag.charAt(0)), Character.toString(hashtag.charAt(1)));
hToken = this.getTokenForKey(hashValue);
}
if (hToken == null) {
throw new NoAvailableHostsException("Token not found for key " + key);
}
hostPool = tokenPools.get(hToken.getToken());
if (hostPool == null) {
throw new NoAvailableHostsException(
"Could not find host connection pool for key: " + key + ", hash: " + tokenMapper.hash(key) + " Token:" + hToken.getToken());
}
} else {
// the key is binary
byte[] binaryKey = op.getBinaryKey();
hToken = this.getTokenForKey(binaryKey);
if (hToken == null) {
throw new NoAvailableHostsException("Token not found for key " + binaryKey.toString());
}
hostPool = tokenPools.get(hToken.getToken());
if (hostPool == null) {
throw new NoAvailableHostsException(
"Could not find host connection pool for key: " + binaryKey.toString() + ", hash: " + tokenMapper.hash(binaryKey) + " Token:" + getTokenForKey(binaryKey));
}
}
return hostPool;
}
@Override
public Map<HostConnectionPool<CL>, BaseOperation<CL, ?>> getPoolsForOperationBatch(
Collection<BaseOperation<CL, ?>> ops) throws NoAvailableHostsException {
throw new RuntimeException("Not Implemented");
}
@Override
public List<HostConnectionPool<CL>> getOrderedHostPools() {
return new ArrayList<HostConnectionPool<CL>>(tokenPools.values());
}
@Override
public HostConnectionPool<CL> getPoolForToken(Long token) {
return tokenPools.get(token);
}
public List<HostConnectionPool<CL>> getPoolsForTokens(Long start, Long end) {
throw new RuntimeException("Not Implemented");
}
@Override
public HostToken getTokenForKey(String key) throws UnsupportedOperationException {
Long keyHash = tokenMapper.hash(key);
return tokenMapper.getToken(keyHash);
}
@Override
public HostToken getTokenForKey(byte[] key) throws UnsupportedOperationException {
Long keyHash = tokenMapper.hash(key);
return tokenMapper.getToken(keyHash);
}
@Override
public boolean addHostPool(HostToken hostToken, HostConnectionPool<CL> hostPool) {
HostConnectionPool<CL> prevPool = tokenPools.put(hostToken.getToken(), hostPool);
if (prevPool == null) {
tokenMapper.addHostToken(hostToken);
return true;
} else {
return false;
}
}
@Override
public boolean removeHostPool(HostToken hostToken) {
HostConnectionPool<CL> prev = tokenPools.get(hostToken.getToken());
if (prev != null) {
tokenPools.remove(hostToken.getToken());
return true;
} else {
return false;
}
}
@Override
public boolean isTokenAware() {
return true;
}
@Override
public boolean isEmpty() {
return tokenPools.isEmpty();
}
public Long getKeyHash(String key) {
Long keyHash = tokenMapper.hash(key);
return keyHash;
}
@Override
public String toString() {
return "TokenAwareSelection: " + tokenMapper.toString();
}
}
| 6,066 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/lb/RoundRobinSelection.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.lb;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.dyno.connectionpool.BaseOperation;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.HostSelectionStrategy;
/**
* Simple impl of {@link HostSelectionStrategy} that uses ROUND ROBIN. It employs the {@link CircularList} data structure
* to provide RR balancing in a thread safe manner.
* Note that the class can also support dynamically adding and removing {@link Host}
*
* @param <CL>
* @author poberai
*/
public class RoundRobinSelection<CL> implements HostSelectionStrategy<CL> {
// The total set of host pools. Once the host is selected, we ask it's corresponding pool to vend a connection
private final ConcurrentHashMap<Long, HostConnectionPool<CL>> tokenPools = new ConcurrentHashMap<Long, HostConnectionPool<CL>>();
// the circular list of Host over which we load balance in a round robin fashion
private final CircularList<HostToken> circularList = new CircularList<HostToken>(null);
public RoundRobinSelection() {
}
@Override
public HostConnectionPool<CL> getPoolForOperation(BaseOperation<CL, ?> op, String hashtag) throws NoAvailableHostsException {
int numTries = circularList.getSize();
HostConnectionPool<CL> lastPool = null;
while (numTries > 0) {
lastPool = getNextConnectionPool();
numTries--;
if (lastPool.isActive() && lastPool.getHost().isUp()) {
return lastPool;
}
}
// If we reach here then we haven't found an active pool. Return the last inactive pool anyways,
// and HostSelectionWithFallback can choose a fallback pool from another dc
return lastPool;
}
@Override
public Map<HostConnectionPool<CL>, BaseOperation<CL, ?>> getPoolsForOperationBatch(Collection<BaseOperation<CL, ?>> ops) throws NoAvailableHostsException {
Map<HostConnectionPool<CL>, BaseOperation<CL, ?>> map = new HashMap<HostConnectionPool<CL>, BaseOperation<CL, ?>>();
for (BaseOperation<CL, ?> op : ops) {
map.put(getNextConnectionPool(), op);
}
return map;
}
@Override
public List<HostConnectionPool<CL>> getOrderedHostPools() {
return new ArrayList<HostConnectionPool<CL>>(tokenPools.values());
}
@Override
public HostConnectionPool<CL> getPoolForToken(Long token) {
return tokenPools.get(token);
}
@Override
public List<HostConnectionPool<CL>> getPoolsForTokens(Long start, Long end) {
throw new UnsupportedOperationException();
}
@Override
public HostToken getTokenForKey(String key) throws UnsupportedOperationException {
throw new UnsupportedOperationException("Not implemented for Round Robin load balancing strategy");
}
@Override
public HostToken getTokenForKey(byte[] key) throws UnsupportedOperationException {
throw new UnsupportedOperationException("Not implemented for Round Robin load balancing strategy");
}
private HostConnectionPool<CL> getNextConnectionPool() throws NoAvailableHostsException {
HostToken hostToken = circularList.getNextElement();
HostConnectionPool<CL> hostPool = tokenPools.get(hostToken.getToken());
if (hostPool == null) {
throw new NoAvailableHostsException("Could not find host connection pool for host token: " + hostToken);
}
return hostPool;
}
@Override
public void initWithHosts(Map<HostToken, HostConnectionPool<CL>> hPools) {
for (HostToken token : hPools.keySet()) {
tokenPools.put(token.getToken(), hPools.get(token));
}
circularList.swapWithList(hPools.keySet());
}
@Override
public boolean addHostPool(HostToken host, HostConnectionPool<CL> hostPool) {
HostConnectionPool<CL> prevPool = tokenPools.put(host.getToken(), hostPool);
if (prevPool == null) {
List<HostToken> newHostList = new ArrayList<HostToken>(circularList.getEntireList());
newHostList.add(host);
circularList.swapWithList(newHostList);
}
return prevPool == null;
}
@Override
public boolean removeHostPool(HostToken host) {
HostConnectionPool<CL> prevPool = tokenPools.get(host.getToken());
if (prevPool != null) {
List<HostToken> newHostList = new ArrayList<HostToken>(circularList.getEntireList());
newHostList.remove(host);
circularList.swapWithList(newHostList);
tokenPools.remove(host.getToken());
}
return prevPool != null;
}
@Override
public boolean isTokenAware() {
return false;
}
@Override
public boolean isEmpty() {
return tokenPools.isEmpty();
}
public String toString() {
return "RoundRobinSelector: list: " + circularList.toString();
}
}
| 6,067 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/utils/ConfigUtils.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.utils;
/**
* Simple helper class that provides convenience methods for configuration
* related tasks.
*
* @author jcacciatore
* @author ipapapanagiotou
*/
public class ConfigUtils {
public static String getLocalZone() {
String localRack = System.getenv("LOCAL_RACK") == null ? System.getProperty("LOCAL_RACK") : System.getenv("LOCAL_RACK");
//backward compatible
if (localRack == null) {
localRack = System.getenv("EC2_AVAILABILITY_ZONE") == null ? System.getProperty("EC2_AVAILABILITY_ZONE") : System.getenv("EC2_AVAILABILITY_ZONE");
}
return localRack;
}
/**
*
* @return the datacenter that the client is in
*/
public static String getDataCenter() {
String localDatacenter = System.getenv("LOCAL_DATACENTER") == null ? System.getProperty("LOCAL_DATACENTER") : System.getenv("LOCAL_DATACENTER");
//backward compatible
if (localDatacenter == null) {
localDatacenter = System.getenv("EC2_REGION") == null ? System.getProperty("EC2_REGION") : System.getenv("EC2_REGION");
}
if (localDatacenter == null) {
return getDataCenterFromRack(getLocalZone());
}
return localDatacenter;
}
/**
*
* Datacenter format us-east-x, us-west-x etc.
* @param rack
* @return the datacenter based on the provided rack
*/
public static String getDataCenterFromRack(String rack) {
if (rack != null) {
return rack.substring(0, rack.length() - 1);
}
return null;
}
}
| 6,068 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/utils/IOUtilities.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.utils;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
public class IOUtilities {
public static List<String> readLines(File file) {
BufferedReader reader = null;
try {
reader = new BufferedReader(new FileReader(file));
String line = null;
List<String> lines = new ArrayList<String>();
while ((line = reader.readLine()) != null) {
lines.add(line);
}
return lines;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
}
}
}
}
public static String toString(InputStream in) {
byte[] buffer = new byte[1024];
int numRead = -1;
StringBuilder sb = new StringBuilder();
try {
while ((numRead = in.read(buffer)) != -1) {
sb.append(new String(buffer, 0, numRead));
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return sb.toString();
}
}
| 6,069 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/utils/EstimatedHistogram.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.utils;
import java.util.Arrays;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicLongArray;
import org.slf4j.Logger;
public class EstimatedHistogram {
/**
* The series of values to which the counts in `buckets` correspond:
* 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc.
* Thus, a `buckets` of [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of 4.
*
* The series starts at 1 and grows by 1.2 each time (rounding and removing duplicates). It goes from 1
* to around 36M by default (creating 90+1 buckets), which will give us timing resolution from microseconds to
* 36 seconds, with less precision as the numbers get larger.
*
* Each bucket represents values from (previous bucket offset, current offset].
*/
private final long[] bucketOffsets;
// buckets is one element longer than bucketOffsets -- the last element is values greater than the last offset
final AtomicLongArray buckets;
public EstimatedHistogram() {
this(90);
}
public EstimatedHistogram(int bucketCount) {
bucketOffsets = newOffsets(bucketCount);
buckets = new AtomicLongArray(bucketOffsets.length + 1);
}
public EstimatedHistogram(long[] offsets, long[] bucketData) {
assert bucketData.length == offsets.length + 1;
bucketOffsets = offsets;
buckets = new AtomicLongArray(bucketData);
}
private static long[] newOffsets(int size) {
long[] result = new long[size];
long last = 1;
result[0] = last;
for (int i = 1; i < size; i++) {
long next = Math.round(last * 1.2);
if (next == last)
next++;
result[i] = next;
last = next;
}
return result;
}
/**
* @return the histogram values corresponding to each bucket index
*/
public long[] getBucketOffsets() {
return bucketOffsets;
}
/**
* Increments the count of the bucket closest to n, rounding UP.
* @param n
*/
public void add(long n) {
int index = Arrays.binarySearch(bucketOffsets, n);
if (index < 0) {
// inexact match, take the first bucket higher than n
index = -index - 1;
}
// else exact match; we're good
buckets.incrementAndGet(index);
}
/**
* @return the count in the given bucket
*/
long get(int bucket) {
return buckets.get(bucket);
}
/**
* @param reset zero out buckets afterwards if true
* @return a long[] containing the current histogram buckets
*/
public long[] getBuckets(boolean reset) {
final int len = buckets.length();
long[] rv = new long[len];
if (reset)
for (int i = 0; i < len; i++)
rv[i] = buckets.getAndSet(i, 0L);
else
for (int i = 0; i < len; i++)
rv[i] = buckets.get(i);
return rv;
}
/**
* @return the smallest value that could have been added to this histogram
*/
public long min() {
for (int i = 0; i < buckets.length(); i++) {
if (buckets.get(i) > 0)
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
}
return 0;
}
/**
* @return the largest value that could have been added to this histogram. If the histogram
* overflowed, returns Long.MAX_VALUE.
*/
public long max() {
int lastBucket = buckets.length() - 1;
if (buckets.get(lastBucket) > 0)
return Long.MAX_VALUE;
for (int i = lastBucket - 1; i >= 0; i--) {
if (buckets.get(i) > 0)
return bucketOffsets[i];
}
return 0;
}
/**
* @param percentile
* @return estimated value at given percentile
*/
public long percentile(double percentile) {
assert percentile >= 0 && percentile <= 1.0;
int lastBucket = buckets.length() - 1;
if (buckets.get(lastBucket) > 0)
throw new IllegalStateException("Unable to compute when histogram overflowed");
long pcount = (long) Math.floor(count() * percentile);
if (pcount == 0)
return 0;
long elements = 0;
for (int i = 0; i < lastBucket; i++) {
elements += buckets.get(i);
if (elements >= pcount)
return bucketOffsets[i];
}
return 0;
}
/**
* @return the mean histogram value (average of bucket offsets, weighted by count)
* @throws IllegalStateException if any values were greater than the largest bucket threshold
*/
public long mean() {
int lastBucket = buckets.length() - 1;
if (buckets.get(lastBucket) > 0)
throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed");
long elements = 0;
long sum = 0;
for (int i = 0; i < lastBucket; i++) {
long bCount = buckets.get(i);
elements += bCount;
sum += bCount * bucketOffsets[i];
}
return (long) Math.ceil((double) sum / elements);
}
/**
* @return the total number of non-zero values
*/
public long count() {
long sum = 0L;
for (int i = 0; i < buckets.length(); i++)
sum += buckets.get(i);
return sum;
}
/**
* @return true if this histogram has overflowed -- that is, a value larger than our largest bucket could bound was added
*/
public boolean isOverflowed() {
return buckets.get(buckets.length() - 1) > 0;
}
/**
* log.debug() every record in the histogram
*
* @param log
*/
public void log(Logger log) {
// only print overflow if there is any
int nameCount;
if (buckets.get(buckets.length() - 1) == 0)
nameCount = buckets.length() - 1;
else
nameCount = buckets.length();
String[] names = new String[nameCount];
int maxNameLength = 0;
for (int i = 0; i < nameCount; i++) {
names[i] = nameOfRange(bucketOffsets, i);
maxNameLength = Math.max(maxNameLength, names[i].length());
}
// emit log records
String formatstr = "%" + maxNameLength + "s: %d";
for (int i = 0; i < nameCount; i++) {
long count = buckets.get(i);
// sort-of-hack to not print empty ranges at the start that are only used to demarcate the
// first populated range. for code clarity we don't omit this record from the maxNameLength
// calculation, and accept the unnecessary whitespace prefixes that will occasionally occur
if (i == 0 && count == 0)
continue;
log.debug(String.format(formatstr, names[i], count));
}
}
private static String nameOfRange(long[] bucketOffsets, int index) {
StringBuilder sb = new StringBuilder();
appendRange(sb, bucketOffsets, index);
return sb.toString();
}
private static void appendRange(StringBuilder sb, long[] bucketOffsets, int index) {
sb.append("[");
if (index == 0)
if (bucketOffsets[0] > 0)
// by original definition, this histogram is for values greater than zero only;
// if values of 0 or less are required, an entry of lb-1 must be inserted at the start
sb.append("1");
else
sb.append("-Inf");
else
sb.append(bucketOffsets[index - 1] + 1);
sb.append("..");
if (index == bucketOffsets.length)
sb.append("Inf");
else
sb.append(bucketOffsets[index]);
sb.append("]");
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof EstimatedHistogram))
return false;
EstimatedHistogram that = (EstimatedHistogram) o;
return Arrays.equals(getBucketOffsets(), that.getBucketOffsets()) &&
Arrays.equals(getBuckets(false), that.getBuckets(false));
}
@Override
public int hashCode() {
return Objects.hash(getBucketOffsets(), getBuckets(false));
}
}
| 6,070 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/utils/CollectionUtils.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.utils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Simple utils that operation on collections. Use of this class helps avoid many uses of collection utilities in guava
* and we want to avoid using guava as much as possible to minimize dep jar version conflicts.
*
* @author poberai
*/
public class CollectionUtils {
public interface Transform<X, Y> {
public Y get(X x);
}
public interface MapEntryTransform<X, Y, Z> {
public Z get(X x, Y y);
}
public static interface Predicate<X> {
public boolean apply(X x);
}
public static <X, Y> Collection<Y> transform(Collection<X> from, Transform<X, Y> transform) {
List<Y> list = new ArrayList<Y>();
for (X x : from) {
Y y = transform.get(x);
list.add(y);
}
return list;
}
public static <X> Collection<X> filter(Collection<X> from, Predicate<X> predicate) {
List<X> list = new ArrayList<X>();
for (X x : from) {
if (predicate.apply(x)) {
list.add(x);
}
}
return list;
}
public static <X> X find(Collection<X> from, Predicate<X> predicate) {
for (X x : from) {
if (predicate.apply(x)) {
return x;
}
}
return null;
}
public static <X, Y> Map<X, Y> filterKeys(Map<X, Y> from, Predicate<X> predicate) {
Map<X, Y> toMap = new HashMap<X, Y>();
for (X x : from.keySet()) {
if (predicate.apply(x)) {
toMap.put(x, from.get(x));
}
}
return toMap;
}
public static <X, Y, Z> void transform(Map<X, Y> from, Map<X, Z> to, MapEntryTransform<X, Y, Z> transform) {
for (X x : from.keySet()) {
Y fromValue = from.get(x);
Z toValue = transform.get(x, fromValue);
to.put(x, toValue);
}
}
public static <X, Y, Z> Map<X, Z> transform(Map<X, Y> from, MapEntryTransform<X, Y, Z> transform) {
Map<X, Z> toMap = new HashMap<X, Z>();
transform(from, toMap, transform);
return toMap;
}
public static <X, Y, Z> Map<Y, Z> transformMapKeys(Map<X, Z> from, Transform<X, Y> transform) {
Map<Y, Z> toMap = new HashMap<Y, Z>();
for (X x : from.keySet()) {
Z z = from.get(x);
if (z != null) {
Y y = transform.get(x);
toMap.put(y, z);
}
}
return toMap;
}
public static <X, Y> MapDifference<X, Y> difference(Map<X, Y> left, Map<X, Y> right) {
MapDifference<X, Y> diff = new MapDifference<X, Y>();
for (X keyInLeft : left.keySet()) {
if (!right.containsKey(keyInLeft)) {
diff.leftOnly.put(keyInLeft, left.get(keyInLeft));
}
}
for (X keyInRight : right.keySet()) {
if (!left.containsKey(keyInRight)) {
diff.rightOnly.put(keyInRight, right.get(keyInRight));
}
}
return diff;
}
public static class MapDifference<X, Y> {
private Map<X, Y> leftOnly = new HashMap<X, Y>();
private Map<X, Y> rightOnly = new HashMap<X, Y>();
public Map<X, Y> entriesOnlyOnLeft() {
return leftOnly;
}
public Map<X, Y> entriesOnlyOnRight() {
return rightOnly;
}
}
public static <X> List<X> newArrayList(X... args) {
return Arrays.asList(args);
}
}
| 6,071 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/utils/RateLimitUtil.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.impl.utils;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
public class RateLimitUtil {
private final AtomicReference<InnerState> ref = new AtomicReference<InnerState>(null);
private RateLimitUtil(int rps) {
this.ref.set(new InnerState(rps));
}
public static RateLimitUtil create(int n) {
return new RateLimitUtil(n);
}
public int getRps() {
return ref.get().getRps();
}
public boolean acquire() {
if (ref.get().checkSameSecond()) {
long timeToSleepMs = ref.get().increment();
if (timeToSleepMs != -1) {
try {
Thread.sleep(timeToSleepMs);
return false;
} catch (InterruptedException e) {
// do nothing here
return false;
}
} else {
return true;
}
} else {
InnerState oldState = ref.get();
InnerState newState = new InnerState(oldState.limit);
ref.compareAndSet(oldState, newState);
return false;
}
}
private class InnerState {
private final AtomicInteger counter = new AtomicInteger();
private final AtomicLong second = new AtomicLong(0L);
private final AtomicLong origTime = new AtomicLong(0L);
private final int limit;
private InnerState(int limit) {
this.limit = limit;
counter.set(0);
origTime.set(System.currentTimeMillis());
second.set(origTime.get() / 1000);
}
private boolean checkSameSecond() {
long time = System.currentTimeMillis();
return second.get() == time / 1000;
}
private long increment() {
if (counter.get() < limit) {
counter.incrementAndGet();
return -1;
} else {
return System.currentTimeMillis() - origTime.get();
}
}
private int getRps() {
return limit;
}
}
}
| 6,072 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/impl/utils/ZipUtils.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.impl.utils;
import org.apache.commons.io.IOUtils;
import com.sun.jersey.core.util.Base64;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
public final class ZipUtils {
private ZipUtils() {
}
public static byte[] compressString(String value) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream(value.length());
try (GZIPOutputStream gos = new GZIPOutputStream(baos)) {
gos.write(Base64.encode(value.getBytes(StandardCharsets.UTF_8)));
}
byte[] compressed = baos.toByteArray();
baos.close();
return compressed;
}
/**
* Encodes the given string and then GZIP compresses it.
*
* @param value String input
* @return compressed byte array output
* @throws IOException
*/
public static byte[] compressStringNonBase64(String value) throws IOException {
return compressBytesNonBase64(value.getBytes(StandardCharsets.UTF_8));
}
/**
* Encodes the given byte array and then GZIP compresses it.
*
* @param value byte array input
* @return compressed byte array output
* @throws IOException
*/
public static byte[] compressBytesNonBase64(byte[] value) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream(value.length);
try (GZIPOutputStream gos = new GZIPOutputStream(baos)) {
gos.write(value);
}
byte[] compressed = baos.toByteArray();
baos.close();
return compressed;
}
/**
* Decompresses the given byte array without transforming it into a String
*
* @param compressed byte array input
* @return decompressed data in a byte array
* @throws IOException
*/
public static byte[] decompressBytesNonBase64(byte[] compressed) throws IOException {
ByteArrayInputStream is = new ByteArrayInputStream(compressed);
try (InputStream gis = new GZIPInputStream(is)) {
return IOUtils.toByteArray(gis);
}
}
/**
* Decompresses the given byte array
*
* @param compressed byte array input
* @return decompressed data in string format
* @throws IOException
*/
public static String decompressStringNonBase64(byte[] compressed) throws IOException {
ByteArrayInputStream is = new ByteArrayInputStream(compressed);
try (InputStream gis = new GZIPInputStream(is)) {
return new String(IOUtils.toByteArray(gis), StandardCharsets.UTF_8);
}
}
/**
* Encodes the given string with Base64 encoding and then GZIP compresses it. Returns
* result as a Base64 encoded string.
*
* @param value input String
* @return Base64 encoded compressed String
* @throws IOException
*/
public static String compressStringToBase64String(String value) throws IOException {
return new String(Base64.encode(compressString(value)), StandardCharsets.UTF_8);
}
/**
* Decompresses the given byte array and decodes with Base64 decoding
*
* @param compressed byte array input
* @return decompressed data in string format
* @throws IOException
*/
public static String decompressString(byte[] compressed) throws IOException {
ByteArrayInputStream is = new ByteArrayInputStream(compressed);
try (InputStream gis = new GZIPInputStream(is)) {
return new String(Base64.decode(IOUtils.toByteArray(gis)), StandardCharsets.UTF_8);
}
}
/**
* Given a Base64 encoded String, decompresses it.
*
* @param compressed Compressed String
* @return decompressed String
* @throws IOException
*/
public static String decompressFromBase64String(String compressed) throws IOException {
return decompressString(Base64.decode(compressed.getBytes(StandardCharsets.UTF_8)));
}
/**
* Determines if a byte array is compressed. The java.util.zip GZip
* implementation does not expose the GZip header so it is difficult to determine
* if a string is compressed.
*
* @param bytes an array of bytes
* @return true if the array is compressed or false otherwise
* @throws java.io.IOException if the byte array couldn't be read
*/
public static boolean isCompressed(byte[] bytes) throws IOException {
return bytes != null && bytes.length >= 2 &&
bytes[0] == (byte) (GZIPInputStream.GZIP_MAGIC) && bytes[1] == (byte) (GZIPInputStream.GZIP_MAGIC >> 8);
}
/**
* Determines if an InputStream is compressed. The java.util.zip GZip
* implementation does not expose the GZip header so it is difficult to determine
* if a string is compressed.
*
* @param inputStream an array of bytes
* @return true if the stream is compressed or false otherwise
* @throws java.io.IOException if the byte array couldn't be read
*/
public static boolean isCompressed(InputStream inputStream) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
byte[] data = new byte[2];
int nRead = inputStream.read(data, 0, 2);
buffer.write(data, 0, nRead);
buffer.flush();
return isCompressed(buffer.toByteArray());
}
/**
* Determines if a String is compressed. The input String <b>must be Base64 encoded</b>.
* The java.util.zip GZip implementation does not expose the GZip header so it is difficult to determine
* if a string is compressed.
*
* @param input String
* @return true if the String is compressed or false otherwise
* @throws java.io.IOException if the byte array of String couldn't be read
*/
public static boolean isCompressed(String input) throws IOException {
return input != null && Base64.isBase64(input) &&
isCompressed(Base64.decode(input.getBytes(StandardCharsets.UTF_8)));
}
} | 6,073 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/ThrottledException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public class ThrottledException extends DynoConnectException implements IsRetryableException {
private static final long serialVersionUID = -9132799199614005261L;
public ThrottledException(String message) {
super(message);
}
public ThrottledException(Throwable t) {
super(t);
}
public ThrottledException(String message, Throwable cause) {
super(message, cause);
}
} | 6,074 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/DynoConnectException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
import java.util.concurrent.TimeUnit;
import com.netflix.dyno.connectionpool.Host;
public class DynoConnectException extends DynoException {
private static final long serialVersionUID = 5111292446354085002L;
private Host host = Host.NO_HOST;
private long latency = 0;
private long latencyWithPool = 0;
private int attemptCount = 0;
public DynoConnectException(String message) {
super(message);
}
public DynoConnectException(Throwable t) {
super(t);
}
public DynoConnectException(String message, Throwable cause) {
super(message, cause);
}
public DynoConnectException setHost(Host host) {
this.host = host;
return this;
}
public Host getHost() {
return this.host;
}
public DynoConnectException setLatency(long latency) {
this.latency = latency;
return this;
}
public long getLatency() {
return this.latency;
}
public long getLatency(TimeUnit units) {
return units.convert(this.latency, TimeUnit.NANOSECONDS);
}
public DynoException setLatencyWithPool(long latency) {
this.latencyWithPool = latency;
return this;
}
public long getLatencyWithPool() {
return this.latencyWithPool;
}
@Override
public String getMessage() {
return new StringBuilder()
.append(getClass().getSimpleName())
.append(": [")
.append("host=").append(host.toString())
.append(", latency=").append(latency).append("(").append(latencyWithPool).append(")")
.append(", attempts=").append(attemptCount)
.append("]")
.append(super.getMessage())
.toString();
}
public String getOriginalMessage() {
return super.getMessage();
}
public DynoConnectException setAttempt(int attemptCount) {
this.attemptCount = attemptCount;
return this;
}
}
| 6,075 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/PoolExhaustedException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
import com.netflix.dyno.connectionpool.HostConnectionPool;
/**
* Indicates there are no connections left in the host connection pool.
*/
public class PoolExhaustedException extends DynoConnectException {
private final HostConnectionPool hcp;
private static final long serialVersionUID = 9081993527008721028L;
public PoolExhaustedException(HostConnectionPool hostConnectionPool, String message) {
super(message);
this.hcp = hostConnectionPool;
}
public PoolExhaustedException(HostConnectionPool hostConnectionPool, String message, Throwable cause) {
super(message, cause);
this.hcp = hostConnectionPool;
}
public PoolExhaustedException(HostConnectionPool hostConnectionPool, Throwable t) {
super(t);
this.hcp = hostConnectionPool;
}
public HostConnectionPool getHostConnectionPool() {
return this.hcp;
}
}
| 6,076 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/NoAvailableHostsException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public class NoAvailableHostsException extends DynoConnectException {
private static final long serialVersionUID = -6345231310492496030L;
public NoAvailableHostsException(String message) {
super(message);
}
}
| 6,077 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/PoolOfflineException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
import com.netflix.dyno.connectionpool.Host;
/**
* Indicates the pool is likely not active when accessed
*/
public class PoolOfflineException extends DynoConnectException {
private static final long serialVersionUID = -345340994112630363L;
public PoolOfflineException(Host host, String message) {
super(message);
}
} | 6,078 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/DynoException.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.connectionpool.exception;
/**
* Connection exception caused by an error in the connection pool or a transport
* error related to the connection itself. Application errors are derived from
* OperationException.
*
* @author poberai
*/
public class DynoException extends RuntimeException {
private static final long serialVersionUID = 3757459937536486618L;
public DynoException() {
super();
}
public DynoException(String message, Throwable cause) {
super(message, cause);
}
public DynoException(String message) {
super(message);
}
public DynoException(Throwable cause) {
super(cause);
}
}
| 6,079 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/IsRetryableException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public interface IsRetryableException {
}
| 6,080 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/PoolTimeoutException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
/**
* Indicates that a thread waiting to obtain a connection from the pool has timed-out while waiting; it's likely
* that all connections are busy servicing requests.
*/
public class PoolTimeoutException extends DynoConnectException implements IsRetryableException {
private static final long serialVersionUID = -8579946319118318717L;
public PoolTimeoutException(String message) {
super(message);
}
public PoolTimeoutException(Throwable t) {
super(t);
}
public PoolTimeoutException(String message, Throwable cause) {
super(message, cause);
}
} | 6,081 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/BadRequestException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public class BadRequestException extends DynoException {
/**
*
*/
private static final long serialVersionUID = 6244389154130041929L;
}
| 6,082 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/FatalConnectionException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public class FatalConnectionException extends DynoConnectException {
/**
*
*/
private static final long serialVersionUID = 953422826144906928L;
public FatalConnectionException(String message) {
super(message);
}
public FatalConnectionException(Throwable t) {
super(t);
}
public FatalConnectionException(String message, Throwable cause) {
super(message, cause);
}
}
| 6,083 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/IsDeadConnectionException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public interface IsDeadConnectionException {
}
| 6,084 |
0 | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool | Create_ds/dyno/dyno-core/src/main/java/com/netflix/dyno/connectionpool/exception/TimeoutException.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.connectionpool.exception;
public class TimeoutException extends DynoConnectException implements IsRetryableException, IsDeadConnectionException {
private static final long serialVersionUID = 5025308550262085866L;
public TimeoutException(String message) {
super(message);
}
public TimeoutException(Throwable t) {
super(t);
}
public TimeoutException(String message, Throwable cause) {
super(message, cause);
}
} | 6,085 |
0 | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes/lock/LocalRedisLockTest.java | package com.netflix.dyno.recipes.lock;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import redis.embedded.RedisServer;
import java.io.IOException;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
public class LocalRedisLockTest extends DynoLockClientTest {
private static final int REDIS_PORT = 8999;
private static final String REDIS_RACK = "rack-1c";
private static final String REDIS_DATACENTER = "rack-1";
private RedisServer redisServer;
@Before
public void setUp() throws IOException {
redisServer = new RedisServer(REDIS_PORT);
redisServer.start();
Assume.assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win"));
host = new HostBuilder()
.setHostname("localhost")
.setIpAddress("127.0.0.1")
.setDatastorePort(REDIS_PORT)
.setPort(REDIS_PORT)
.setRack(REDIS_RACK)
.setStatus(Host.Status.Up)
.createHost();
tokenMapSupplier = new TokenMapSupplierImpl(host);
dynoLockClient = constructDynoLockClient();
}
@After
public void tearDown() {
if (redisServer != null) {
redisServer.stop();
}
}
public DynoLockClient constructDynoLockClient() {
HostSupplier hostSupplier = () -> Collections.singletonList(host);
final ConnectionPoolConfigurationImpl connectionPoolConfiguration =
new ConnectionPoolConfigurationImpl(REDIS_RACK);
connectionPoolConfiguration.withTokenSupplier(tokenMapSupplier);
connectionPoolConfiguration.setLocalRack(REDIS_RACK);
connectionPoolConfiguration.setLocalDataCenter(REDIS_DATACENTER);
connectionPoolConfiguration.setConnectToDatastore(true);
return new DynoLockClient.Builder()
.withApplicationName("test")
.withDynomiteClusterName("testcluster")
.withHostSupplier(hostSupplier)
.withTokenMapSupplier(tokenMapSupplier)
.withTimeout(50)
.withConnectionPoolConfiguration(connectionPoolConfiguration)
.withTimeoutUnit(TimeUnit.MILLISECONDS)
.build();
}
}
| 6,086 |
0 | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes/lock/DynoLockClientTest.java | package com.netflix.dyno.recipes.lock;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.recipes.util.Tuple;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import java.util.stream.IntStream;
public abstract class DynoLockClientTest {
Host host;
TokenMapSupplierImpl tokenMapSupplier;
DynoLockClient dynoLockClient;
String resource = "testResource";
public abstract DynoLockClient constructDynoLockClient();
@After
public void releaseLock() {
dynoLockClient.releaseLock(resource);
}
@Test
public void testAcquireLockWithExtension() throws InterruptedException {
boolean acquireResult = dynoLockClient.acquireLock(resource, 500, (rsc) -> {});
Assert.assertTrue("Failed to acquire lock on resource", acquireResult);
Thread.sleep(3000);
Assert.assertTrue(dynoLockClient.checkLock(resource) > 0);
dynoLockClient.releaseLock(resource);
Assert.assertTrue(dynoLockClient.checkLock(resource) == 0);
}
@Test
public void testExtendLockAndCheckResourceExists() {
long v = dynoLockClient.acquireLock(resource, 500);
Assert.assertTrue("Acquire lock did not succeed in time", v > 0);
Assert.assertEquals(1, dynoLockClient.getLockedResources().size());
Assert.assertTrue(dynoLockClient.checkResourceExists(resource));
long ev = dynoLockClient.extendLock(resource, 1000);
Assert.assertTrue("Extend lock did not extend the lock", ev > 500);
dynoLockClient.releaseLock(resource);
Assert.assertEquals(0, dynoLockClient.getLockedResources().size());
}
@Test
public void testReleaseLock() {
long v = dynoLockClient.acquireLock(resource, 100);
Assert.assertTrue("Acquire lock did not succeed in time", v > 0);
dynoLockClient.releaseLock(resource);
v = dynoLockClient.checkLock(resource);
Assert.assertTrue("Release lock failed",v == 0);
}
@Test
public void testExtendLockFailsIfTooLate() throws InterruptedException {
long v = dynoLockClient.acquireLock(resource, 100);
Assert.assertTrue("Acquire lock did not succeed in time", v > 0);
Assert.assertEquals(1, dynoLockClient.getLockedResources().size());
Thread.sleep(100);
long ev = dynoLockClient.extendLock(resource, 1000);
Assert.assertTrue("Extend lock extended the lock even when late", ev == 0);
Assert.assertEquals(0, dynoLockClient.getLockedResources().size());
}
@Test
public void testCheckLock() {
long v = dynoLockClient.acquireLock(resource, 5000);
Assert.assertTrue("Acquire lock did not succeed in time", v > 0);
Assert.assertEquals(1, dynoLockClient.getLockedResources().size());
v = dynoLockClient.checkLock(resource);
Assert.assertTrue("Check lock failed for acquired lock",v > 0);
dynoLockClient.releaseLock(resource);
Assert.assertTrue("Check lock failed for acquired lock", dynoLockClient.checkLock(resource) == 0);
}
@Test
public void testLockClient() {
long v = dynoLockClient.acquireLock(resource, 1000);
Assert.assertTrue("Acquire lock did not succeed in time", v > 0);
Assert.assertEquals(1, dynoLockClient.getLockedResources().size());
dynoLockClient.releaseLock(resource);
Assert.assertEquals(0, dynoLockClient.getLockedResources().size());
}
@Test
public void testLockClientConcurrent() {
DynoLockClient[] cs = new DynoLockClient[] {constructDynoLockClient(), constructDynoLockClient(), constructDynoLockClient()};
CopyOnWriteArrayList<DynoLockClient> clients = new CopyOnWriteArrayList<>(cs);
List<Long> ttls = Arrays.asList(new Long[]{1000L, 500L, 250L});
AtomicInteger count = new AtomicInteger(3);
Collections.shuffle(ttls);
ConcurrentLinkedDeque<Long> ttlQueue = new ConcurrentLinkedDeque<>(ttls);
List<Long> resultList = Collections.synchronizedList(new ArrayList());
Supplier<Tuple<Long, Long>> acquireLock = () -> {
long ttl = ttlQueue.poll();
long value = clients.get(count.decrementAndGet()).acquireLock(resource, ttl);
resultList.add(value);
return new Tuple<>(ttl, value);
};
IntStream.range(0, ttls.size()).mapToObj(i -> CompletableFuture.supplyAsync(acquireLock)
.thenAccept(t -> Assert.assertTrue(t._2() < t._1()))).forEach(f -> {
try {
f.get();
} catch (InterruptedException e) {
Assert.fail("Interrupted during the test");
} catch (ExecutionException e) {
e.printStackTrace();
Assert.fail();
}
});
boolean lock = false;
for(Long r: resultList) {
if(r > 0) {
if(lock) {
Assert.fail("Lock did not work as expected " + Arrays.toString(resultList.toArray()));
}
lock = true;
}
}
}
static class TokenMapSupplierImpl implements TokenMapSupplier {
private final HostToken localHostToken;
TokenMapSupplierImpl(Host host) {
this.localHostToken = new HostToken(100000L, host);
}
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return Collections.singletonList(localHostToken);
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return localHostToken;
}
}
} | 6,087 |
0 | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes/lock/VotingHostsFromTokenRangeTest.java | package com.netflix.dyno.recipes.lock;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.lb.CircularList;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class VotingHostsFromTokenRangeTest {
private String r1 = "rack1";
private String r2 = "rack2";
private String r3 = "rack3";
private TokenMapSupplier tokenMapSupplier;
private HostSupplier hostSupplier;
private VotingHostsSelector votingHostsSelector;
private List<Host> hosts;
@Before
public void setUp() {
Host h1 = new HostBuilder().setHostname("h1").setRack(r1).setStatus(Host.Status.Up).createHost();
Host h2 = new HostBuilder().setHostname("h2").setRack(r1).setStatus(Host.Status.Up).createHost();
Host h3 = new HostBuilder().setHostname("h3").setRack(r2).setStatus(Host.Status.Up).createHost();
Host h4 = new HostBuilder().setHostname("h4").setRack(r2).setStatus(Host.Status.Up).createHost();
Host h5 = new HostBuilder().setHostname("h5").setRack(r2).setStatus(Host.Status.Up).createHost();
Host h6 = new HostBuilder().setHostname("h6").setRack(r3).setStatus(Host.Status.Up).createHost();
Host[] arr = {h1, h2, h3, h4, h5, h6};
hosts = Arrays.asList(arr);
final Map<Host, HostToken> tokenMap = new HashMap<>();
tokenMap.put(h1, new HostToken(1111L, h1));
tokenMap.put(h2, new HostToken(2222L, h2));
tokenMap.put(h3, new HostToken(1111L, h3));
tokenMap.put(h4, new HostToken(2222L, h4));
tokenMap.put(h5, new HostToken(3333L, h5));
tokenMap.put(h6, new HostToken(1111L, h6));
hostSupplier = () -> hosts;
tokenMapSupplier = new TokenMapSupplier() {
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return new ArrayList<>(tokenMap.values());
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return tokenMap.get(host);
}
};
}
private void testVotingSize(int votingSize) {
votingHostsSelector = new VotingHostsFromTokenRange(hostSupplier, tokenMapSupplier, votingSize);
CircularList<Host> hosts = votingHostsSelector.getVotingHosts();
Set<String> resultHosts = hosts.getEntireList().stream().map(h -> h.getHostName()).collect(Collectors.toSet());
Assert.assertEquals(votingSize, resultHosts.size());
Assert.assertEquals(votingSize,
hosts.getEntireList().subList(0, votingSize).stream().filter(h1 -> resultHosts.contains(h1.getHostName())).count());
}
@Test
public void getVotingSize() {
IntStream.range(1, 6).filter(i -> i%2 != 0).forEach(i -> testVotingSize(i));
}
} | 6,088 |
0 | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/test/java/com/netflix/dyno/recipes/counter/DistributedCounterTest.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.counter;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.netflix.dyno.connectionpool.TokenPoolTopology;
import com.netflix.dyno.connectionpool.TopologyView;
import com.netflix.dyno.jedis.DynoJedisClient;
public class DistributedCounterTest {
@Mock
private DynoJedisClient client;
@Mock
private TopologyView topologyView;
@Mock
private TokenPoolTopology.TokenStatus token1;
@Mock
private TokenPoolTopology.TokenStatus token2;
@Mock
private TokenPoolTopology.TokenStatus token3;
@Mock
private TokenPoolTopology.TokenStatus token4;
private Map<String, List<TokenPoolTopology.TokenStatus>> topology;
@Before
public void before() {
MockitoAnnotations.initMocks(this);
when(token1.getToken()).thenReturn(1383429731L);
when(token2.getToken()).thenReturn(2457171554L);
when(token3.getToken()).thenReturn(3530913377L);
when(token4.getToken()).thenReturn(309687905L);
List<TokenPoolTopology.TokenStatus> tokenStatusList = Arrays.asList(token1, token2, token3, token4);
topology = new HashMap<String, List<TokenPoolTopology.TokenStatus>>();
topology.put("us-east-1c", tokenStatusList);
when(topologyView.getTokenForKey(anyString())).thenAnswer(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
String arg = args[0].toString();
if (arg.endsWith("_100")) {
return token1.getToken();
} else if (arg.endsWith("_200")) {
return token2.getToken();
} else if (arg.endsWith("_300")) {
return token3.getToken();
} else if (arg.endsWith("_400")) {
return token4.getToken();
}
return 0L;
}
});
}
/**
* Test the behavior that finds the key that matches the tokens in the ring
* <p>
* Topology view from dynomite server node
* <pre>
* {
* us-east-1c:
* {
* 1383429731 : [ ec2-54-226-81-202.compute-1.amazonaws.com, UP ],
* 2457171554 : [ ec2-54-242-76-134.compute-1.amazonaws.com, UP ],
* 3530913377 : [ ec2-54-221-36-52.compute-1.amazonaws.com, UP ]
* 309687905 : [ ec2-54-167-87-164.compute-1.amazonaws.com, UP ]
* }
* .
* .
* .
* }
* </pre>
*/
@Test
public void testGenerateKeys() {
when(client.getTopologyView()).thenReturn(topologyView);
when(topologyView.getTopologySnapshot()).thenReturn(topology);
DynoJedisCounter counter = new DynoJedisCounter("testCounter", client);
List<String> keys = counter.generateKeys();
Assert.assertTrue(4 == keys.size());
}
}
| 6,089 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/util/Tuple.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.recipes.util;
public class Tuple<T1, T2> {
private final T1 a;
private final T2 b;
public Tuple(T1 a, T2 b) {
this.a = a;
this.b = b;
}
public T1 _1() {
return a;
}
public T2 _2() {
return b;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((a == null) ? 0 : a.hashCode());
result = prime * result + ((b == null) ? 0 : b.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
Tuple other = (Tuple) obj;
if (a == null) {
if (other.a != null) return false;
} else if (!a.equals(other.a)) return false;
if (b == null) {
if (other.b != null) return false;
} else if (!b.equals(other.b)) return false;
return true;
}
public static <T1, T2> Tuple<T1, T2> as(T1 t1, T2 t2) {
return new Tuple<T1, T2>(t1, t2);
}
}
| 6,090 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/DynoLockClient.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.lock;
import com.netflix.discovery.EurekaClient;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.contrib.ArchaiusConnectionPoolConfiguration;
import com.netflix.dyno.contrib.DynoCPMonitor;
import com.netflix.dyno.contrib.DynoOPMonitor;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.jedis.DynoJedisUtils;
import com.netflix.dyno.recipes.lock.command.CheckAndRunHost;
import com.netflix.dyno.recipes.lock.command.ExtendHost;
import com.netflix.dyno.recipes.lock.command.LockHost;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
/**
* Client for acquiring locks similar to the redlock implementation https://redis.io/topics/distlock
*
* This locking mechanism does not give the guarantees for safety but can be used for efficiency.
*
* We assume some amount of clock skew between the client and server instances. Any major deviations in this
* will result in reduced accuracy of the lock.
*
* In the common locking case we rely on TTL's being set on a majority of redis servers in order to achieve the right
* locking characteristic. If we are unable to reach a majority of hosts when we try to acquire a lock or extend a lock.
*
* We try to release locks on all the hosts when we either shutdown or are unable to lock on a majority of hosts successfully
*
* These are the main edge cases where locking might not be mutually exclusive
*
* 1. An instance we acquired the lock on goes down and gets replaced by a new instance before the lock TTL expires.
* As suggested in the blog above, we need to ensure that new servers take longer than TTL time to come up so that
* any existing locks would've expired by then.(the client side code cannot control how quickly your servers come up). This can
* become a real issue if you're bringing up new servers in containers which can come up in a few seconds and you are holding locks for longer
* than the amount of time it takes for a new server to come up.
* 2. You have the JVM go into GC from when you acquired the lock to when you are going to modify the resource blocked by the lock.
* The client needs to ensure that GC is not happening for a long enough time that it can effect the assumption of lock being held (or have an alert on long GCs).
*
*/
public class DynoLockClient {
private static final Logger logger = LoggerFactory.getLogger(DynoJedisClient.class);
private final ConnectionPool pool;
private final VotingHostsSelector votingHostsSelector;
private final ExecutorService service;
private final int quorum;
// We assume a small amount of clock drift.
private final double CLOCK_DRIFT = 0.01;
private TimeUnit timeoutUnit;
private long timeout;
private final ConcurrentHashMap<String, String> resourceKeyMap = new ConcurrentHashMap<>();
public DynoLockClient(ConnectionPool pool, VotingHostsSelector votingHostsSelector, long timeout, TimeUnit unit) {
this.pool = pool;
this.votingHostsSelector = votingHostsSelector;
// Threads for locking and unlocking
this.service = Executors.newCachedThreadPool();
this.quorum = votingHostsSelector.getVotingSize() / 2 + 1;
this.timeout = timeout;
this.timeoutUnit = unit;
// We want to release all locks in case of a graceful shutdown
Runtime.getRuntime().addShutdownHook(new Thread(() -> cleanup()));
}
public void setTimeoutUnit(TimeUnit timeoutUnit) {
this.timeoutUnit = timeoutUnit;
}
public void setTimeout(long timeout) {
this.timeout = timeout;
}
private static String getRandomString() {
return UUID.randomUUID().toString();
}
/**
* Gets list of resources which are locked by the client
* @return
*/
public List<String> getLockedResources() {
return new ArrayList<>(resourceKeyMap.keySet());
}
/**
* Release the lock (if held) on the resource
* @param resource
*/
public void releaseLock(String resource) {
if (!checkResourceExists(resource)) {
logger.info("No lock held on {}", resource);
return;
}
CountDownLatch latch = new CountDownLatch(votingHostsSelector.getVotingSize());
votingHostsSelector.getVotingHosts().getEntireList().stream()
.map(host -> new CheckAndRunHost(host, pool, "del", resource, resourceKeyMap.get(resource)))
.forEach(ulH -> CompletableFuture.supplyAsync(ulH, service)
.thenAccept(result -> latch.countDown())
);
boolean latchValue = false;
try {
latchValue = latch.await(timeout, timeoutUnit);
} catch (InterruptedException e) {
logger.info("Interrupted while releasing the lock for resource {}", resource);
}
if (latchValue) {
logger.info("Released lock on {}", resource);
} else {
logger.info("Timed out before we could release the lock");
}
resourceKeyMap.remove(resource);
}
/**
* Return a timer task which will recursively schedule itself when extension is successful.
* @param runJob
* @param resource
* @param ttlMS
* @param extensionFailed - This function gets called with the resource name when extension was unsuccessful.
* @return
*/
private TimerTask getExtensionTask(Timer runJob, String resource, long ttlMS, Consumer<String> extensionFailed) {
return new TimerTask() {
@Override
public void run() {
long extendedValue = extendLock(resource, ttlMS);
if (extendedValue > 0) {
logger.info("Extended lock on {} for {} MS", resource, ttlMS);
TimerTask task = getExtensionTask(runJob, resource, ttlMS, extensionFailed);
runJob.schedule(task, extendedValue/2);
return;
}
extensionFailed.accept(resource);
}
};
}
/**
* Try to acquire lock on resource for ttlMS and keep extending it by ttlMS when its about to expire.
* Calls the failure function with the resource when extension fails.
* @param resource The resource on which you want to acquire a lock
* @param ttlMS The amount of time for which we need to acquire the lock. We try to extend the lock every ttlMS / 2
* @param failure This function will be called with the resource which could not be locked. This function is called
* even if the client released the lock.
* @return returns true if we were able to successfully acqurie the lock.
*/
public boolean acquireLock(String resource, long ttlMS, Consumer<String> failure) {
return acquireLockWithExtension(resource, ttlMS, (r) -> {
releaseLock(r);
failure.accept(r);
});
}
/**
* Try to acquire the lock and schedule extension jobs recursively until extension fails.
* @param resource
* @param ttlMS
* @param extensionFailedCallback - gets called with the resource name when extension fails.
* @return
*/
private boolean acquireLockWithExtension(String resource, long ttlMS, Consumer<String> extensionFailedCallback) {
long acquireResult = acquireLock(resource, ttlMS);
if (acquireResult > 0) {
Timer runJob = new Timer(resource, true);
runJob.schedule(getExtensionTask(runJob, resource, ttlMS, extensionFailedCallback), acquireResult/2);
return true;
}
return false;
}
/**
* This function is used to acquire / extend the lock on at least quorum number of hosts
* @param resource
* @param ttlMS
* @param extend
* @return
*/
private long runLockHost(String resource, long ttlMS, boolean extend) {
long startTime = Instant.now().toEpochMilli();
long drift = Math.round(ttlMS * CLOCK_DRIFT) + 2;
LockResource lockResource = new LockResource(resource, ttlMS);
CountDownLatch latch = new CountDownLatch(quorum);
if (extend) {
votingHostsSelector.getVotingHosts().getEntireList().stream()
.map(host -> new ExtendHost(host, pool, lockResource, latch, resourceKeyMap.get(resource)))
.forEach(lH -> CompletableFuture.supplyAsync(lH, service));
} else {
votingHostsSelector.getVotingHosts().getEntireList().stream()
.map(host -> new LockHost(host, pool, lockResource, latch, resourceKeyMap.get(resource)))
.forEach(lH -> CompletableFuture.supplyAsync(lH, service));
}
awaitLatch(latch, resource);
long validity = 0L;
if (lockResource.getLocked() >= quorum) {
long timeElapsed = Instant.now().toEpochMilli() - startTime;
validity = ttlMS - timeElapsed - drift;
} else {
releaseLock(resource);
}
return validity;
}
/**
* Tries to acquire lock on resource for ttlMS milliseconds. Returns the amount of time for which the lock was acquired
* @param resource
* @param ttlMS
* @return
*/
public long acquireLock(String resource, long ttlMS) {
resourceKeyMap.putIfAbsent(resource, getRandomString());
return runLockHost(resource, ttlMS, false);
}
/**
* Check if we still have the lock on a resource
* @param resource
* @return
*/
boolean checkResourceExists(String resource) {
if (!resourceKeyMap.containsKey(resource)) {
logger.info("No lock held on {}", resource);
return false;
} else {
return true;
}
}
private boolean awaitLatch(CountDownLatch latch, String resource) {
try {
return latch.await(timeout, timeoutUnit);
} catch (InterruptedException e) {
logger.info("Interrupted while checking the lock for resource {}", resource);
return false;
}
}
/**
* Check and get the ttls for the lock if it exists. This returns the minimum of ttls returned across the hosts
* @param resource
* @return
*/
public long checkLock(String resource) {
if (!checkResourceExists(resource)) {
return 0;
} else {
long startTime = Instant.now().toEpochMilli();
CopyOnWriteArrayList<Long> resultTtls = new CopyOnWriteArrayList<>();
CountDownLatch latch = new CountDownLatch(quorum);
votingHostsSelector.getVotingHosts().getEntireList().stream()
.map(host -> new CheckAndRunHost(host, pool, "pttl", resource, resourceKeyMap.get(resource)))
.forEach(checkAndRunHost -> CompletableFuture.supplyAsync(checkAndRunHost, service)
.thenAccept(r -> {
String result = r.getResult().toString();
// The lua script returns 0 if we have lost the lock or we get -2 if the ttl expired on
// the key when we checked for the pttl.
if (result.equals("0") || result.equals("-2")) {
logger.info("Lock not present on host");
} else {
resultTtls.add(Long.valueOf(result));
latch.countDown();
}
})
);
boolean latchValue = awaitLatch(latch, resource);
if (latchValue) {
long timeElapsed = Instant.now().toEpochMilli() - startTime;
logger.info("Checked lock on {}", resource);
return Collections.min(resultTtls) - timeElapsed;
} else {
logger.info("Timed out before we could check the lock");
return 0;
}
}
}
/**
* Try to extend lock by ttlMS
* @param resource
* @param ttlMS
* @return
*/
public long extendLock(String resource, long ttlMS) {
if (!checkResourceExists(resource)) {
logger.info("Could not extend lock since its already released");
return 0;
} else {
return runLockHost(resource, ttlMS, true);
}
}
/**
* Release all locks to clean.
*/
public void cleanup() {
resourceKeyMap.keySet().stream().forEach(this::releaseLock);
}
/**
* Log all the lock resources held right now.
*/
public void logLocks() {
resourceKeyMap.entrySet().stream()
.forEach(e -> logger.info("Resource: {}, Key: {}", e.getKey(), e.getValue()));
}
public static class Builder {
private String appName;
private String clusterName;
private TokenMapSupplier tokenMapSupplier;
private HostSupplier hostSupplier;
private ConnectionPoolConfigurationImpl cpConfig;
private EurekaClient eurekaClient;
private long timeout;
private TimeUnit timeoutUnit;
public Builder() {
}
public Builder withTimeout(long timeout) {
this.timeout = timeout;
return this;
}
public Builder withTimeoutUnit(TimeUnit unit) {
this.timeoutUnit = unit;
return this;
}
public Builder withEurekaClient(EurekaClient eurekaClient) {
this.eurekaClient = eurekaClient;
return this;
}
public Builder withApplicationName(String applicationName) {
appName = applicationName;
return this;
}
public Builder withDynomiteClusterName(String cluster) {
clusterName = cluster;
return this;
}
public Builder withHostSupplier(HostSupplier hSupplier) {
hostSupplier = hSupplier;
return this;
}
public Builder withTokenMapSupplier(TokenMapSupplier tokenMapSupplier) {
this.tokenMapSupplier = tokenMapSupplier;
return this;
}
public Builder withConnectionPoolConfiguration(ConnectionPoolConfigurationImpl cpConfig) {
this.cpConfig = cpConfig;
return this;
}
public DynoLockClient build() {
assert (appName != null);
assert (clusterName != null);
if (cpConfig == null) {
cpConfig = new ArchaiusConnectionPoolConfiguration(appName);
logger.info("Dyno Client runtime properties: " + cpConfig.toString());
}
// We do not want to fallback to other azs which is the normal opertion for the connection pool
cpConfig.setFallbackEnabled(false);
cpConfig.setConnectToDatastore(true);
return buildDynoLockClient();
}
private DynoLockClient buildDynoLockClient() {
DynoOPMonitor opMonitor = new DynoOPMonitor(appName);
ConnectionPoolMonitor cpMonitor = new DynoCPMonitor(appName);
DynoJedisUtils.updateConnectionPoolConfig(cpConfig, hostSupplier, tokenMapSupplier, eurekaClient,
clusterName);
if (tokenMapSupplier == null)
tokenMapSupplier = cpConfig.getTokenSupplier();
final ConnectionPool<Jedis> pool = DynoJedisUtils.createConnectionPool(appName, opMonitor, cpMonitor,
cpConfig, null);
VotingHostsFromTokenRange votingHostSelector = new VotingHostsFromTokenRange(hostSupplier, tokenMapSupplier,
cpConfig.getLockVotingSize());
return new DynoLockClient(pool, votingHostSelector, timeout, timeoutUnit);
}
}
}
| 6,091 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/VotingHostsSelector.java | package com.netflix.dyno.recipes.lock;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.impl.lb.CircularList;
public interface VotingHostsSelector {
/**
* Get the list of hosts eligible for voting
* @return
*/
CircularList<Host> getVotingHosts();
/**
* Returns the number of voting hosts
* @return
*/
int getVotingSize();
}
| 6,092 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/VotingHostsFromTokenRange.java | package com.netflix.dyno.recipes.lock;
import com.google.common.collect.ImmutableSet;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.lb.CircularList;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* This class deterministically returns a list of hosts which will be used for voting. We use the TokenRange to get the
* same set of hosts from all clients.
*/
public class VotingHostsFromTokenRange implements VotingHostsSelector {
private final TokenMapSupplier tokenMapSupplier;
private final HostSupplier hostSupplier;
private final CircularList<Host> votingHosts = new CircularList<>(new ArrayList<>());
private final int MIN_VOTING_SIZE = 1;
private final int MAX_VOTING_SIZE = 5;
private final int effectiveVotingSize;
private final AtomicInteger calculatedVotingSize = new AtomicInteger(0);
public VotingHostsFromTokenRange(HostSupplier hostSupplier, TokenMapSupplier tokenMapSupplier, int votingSize) {
this.tokenMapSupplier = tokenMapSupplier;
this.hostSupplier = hostSupplier;
effectiveVotingSize = votingSize == -1 ? MAX_VOTING_SIZE : votingSize;
if(votingSize % 2 == 0) {
throw new IllegalStateException("Cannot perform voting with even number of hosts");
}
getVotingHosts();
}
@Override
public CircularList<Host> getVotingHosts() {
if (votingHosts.getSize() == 0) {
if(effectiveVotingSize % 2 == 0) {
throw new IllegalStateException("Cannot do voting with even number of nodes for voting");
}
List<HostToken> allHostTokens = tokenMapSupplier.getTokens(ImmutableSet.copyOf(hostSupplier.getHosts()));
if (allHostTokens.size() < MIN_VOTING_SIZE) {
throw new IllegalStateException(String.format("Cannot perform voting with less than %d nodes", MIN_VOTING_SIZE));
}
// Total number of hosts present per rack
Map<String, Long> numHostsPerRack = allHostTokens.stream().map(ht -> ht.getHost().getRack()).collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
AtomicInteger numHostsRequired = new AtomicInteger(effectiveVotingSize);
// Map to keep track of number of hosts to take for voting from this rack
Map<String, Integer> numHosts = new HashMap<>();
// Sort racks to get the same order
List<String> racks = numHostsPerRack.keySet().stream().sorted(Comparator.comparing(String::toString)).collect(Collectors.toList());
for(String rack: racks) {
// Take as many hosts as you can from this rack.
int v = (int) Math.min(numHostsRequired.get(), numHostsPerRack.get(rack));
numHostsRequired.addAndGet(-v);
numHosts.put(rack, v);
calculatedVotingSize.addAndGet(v);
}
if(calculatedVotingSize.get() % 2 == 0) {
throw new IllegalStateException("Could not construct voting pool. Min number of hosts not met!");
}
Map<String, List<HostToken>> rackToHostToken = allHostTokens.stream()
.collect(Collectors.groupingBy(ht -> ht.getHost().getRack()));
// Get the final list of voting hosts
List<Host> finalVotingHosts = rackToHostToken.entrySet().stream()
// Sorting on token to get hosts deterministically.
.sorted(Comparator.comparing(Map.Entry::getKey))
.flatMap(e -> {
List<HostToken> temp = e.getValue();
temp.sort(HostToken::compareTo);
return temp.subList(0, numHosts.get(e.getKey())).stream();
})
.map(ht -> ht.getHost())
.collect(Collectors.toList());
votingHosts.swapWithList(finalVotingHosts);
}
return votingHosts;
}
@Override
public int getVotingSize() {
return calculatedVotingSize.get();
}
}
| 6,093 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/LockResource.java | package com.netflix.dyno.recipes.lock;
import java.util.concurrent.atomic.AtomicInteger;
public class LockResource {
private final AtomicInteger locked = new AtomicInteger(0);
private final String resource;
private final long ttlMs;
public LockResource(String resource, long ttlMs) {
this.resource = resource;
this.ttlMs = ttlMs;
}
public String getResource() {
return resource;
}
public long getTtlMs() {
return ttlMs;
}
public int getLocked() {
return locked.get();
}
public int incrementLocked() {
return locked.incrementAndGet();
}
}
| 6,094 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/command/CommandHost.java | package com.netflix.dyno.recipes.lock.command;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.OperationResult;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
/**
* This class is used to handle the host connection startup and cleanup.
* All non abstract subclasses should implement the supplier operation.
* @param <T>
*/
public abstract class CommandHost<T> implements Supplier<OperationResult<T>> {
private final Host host;
private final ConnectionPool pool;
public CommandHost(Host host, ConnectionPool pool) {
this.host = host;
this.pool = pool;
}
public Connection getConnection() {
HostConnectionPool hostPool = pool.getHostPool(host);
return hostPool.borrowConnection(pool.getConfiguration().getMaxTimeoutWhenExhausted(), TimeUnit.MILLISECONDS);
}
public void cleanConnection(Connection connection) {
connection.getContext().reset();
connection.getParentConnectionPool().returnConnection(connection);
}
}
| 6,095 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/command/LockHost.java | package com.netflix.dyno.recipes.lock.command;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionContext;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.jedis.OpName;
import com.netflix.dyno.jedis.operation.BaseKeyOperation;
import com.netflix.dyno.recipes.lock.LockResource;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.params.SetParams;
import java.util.concurrent.CountDownLatch;
/**
* This class is used to acquire the lock on a host with a resource.
*/
public class LockHost extends CommandHost<LockResource> {
private final String value;
private final LockResource lockResource;
private final SetParams params;
private final String randomKey;
private final CountDownLatch latch;
public LockHost(Host host, ConnectionPool pool, LockResource lockResource, CountDownLatch latch, String randomKey) {
super(host, pool);
this.lockResource = lockResource;
this.value = lockResource.getResource();
this.params = SetParams.setParams().nx().px(lockResource.getTtlMs());
this.randomKey = randomKey;
this.latch = latch;
}
@Override
public OperationResult<LockResource> get() {
Connection connection = getConnection();
OperationResult result = connection.execute(new BaseKeyOperation<LockResource>(value, OpName.SET) {
@Override
public LockResource execute(Jedis client, ConnectionContext state) {
String result = client.set(value, randomKey, params);
if (result != null && result.equals("OK")) {
lockResource.incrementLocked();
latch.countDown();
}
return lockResource;
}
});
cleanConnection(connection);
return result;
}
}
| 6,096 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/command/ExtendHost.java | package com.netflix.dyno.recipes.lock.command;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionContext;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.jedis.OpName;
import com.netflix.dyno.jedis.operation.BaseKeyOperation;
import com.netflix.dyno.recipes.lock.LockResource;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.params.SetParams;
import java.util.concurrent.CountDownLatch;
/**
* Instances of this class should be used to perform extend operations on an acquired lock.
*/
public class ExtendHost extends CommandHost<LockResource> {
private static final String cmdScript = " if redis.call(\"get\",KEYS[1]) == ARGV[1] then\n" +
" return redis.call(\"set\",KEYS[1], ARGV[1], \"px\", ARGV[2])" +
" else\n" +
" return 0\n" +
" end";
private final LockResource lockResource;
private final String value;
private final SetParams params;
private final String randomKey;
private final CountDownLatch latch;
public ExtendHost(Host host, ConnectionPool pool, LockResource lockResource, CountDownLatch latch, String randomKey) {
super(host, pool);
this.lockResource = lockResource;
this.value = lockResource.getResource();
this.params = SetParams.setParams().px(lockResource.getTtlMs());
this.randomKey = randomKey;
this.latch = latch;
}
@Override
public OperationResult<LockResource> get() {
Connection connection = getConnection();
OperationResult<LockResource> result = connection.execute(new BaseKeyOperation<Object>(randomKey, OpName.EVAL) {
@Override
public LockResource execute(Jedis client, ConnectionContext state) {
// We need to recheck randomKey in case it got removed before we get here.
if (randomKey == null) {
throw new IllegalStateException("Cannot extend lock with null value for key");
}
String result = client.eval(cmdScript, 1, value, randomKey, String.valueOf(lockResource.getTtlMs()))
.toString();
if (result.equals("OK")) {
lockResource.incrementLocked();
latch.countDown();
}
return lockResource;
}
});
cleanConnection(connection);
return result;
}
}
| 6,097 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/lock/command/CheckAndRunHost.java | package com.netflix.dyno.recipes.lock.command;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionContext;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.jedis.OpName;
import com.netflix.dyno.jedis.operation.BaseKeyOperation;
import redis.clients.jedis.Jedis;
/**
* Runs a command against the host and is used to remove the lock and checking the ttl on the resource
*/
public class CheckAndRunHost extends CommandHost<Object> {
private static final String cmdScript = " if redis.call(\"get\",KEYS[1]) == ARGV[1] then\n" +
" return redis.call(\"%s\",KEYS[1])\n" +
" else\n" +
" return 0\n" +
" end";
private final String resource;
private final String randomKey;
private final String command;
public CheckAndRunHost(Host host, ConnectionPool pool, String command, String resource, String randomKey) {
super(host, pool);
this.command = String.format(cmdScript, command);
this.resource = resource;
this.randomKey = randomKey;
}
@Override
public OperationResult<Object> get() {
Connection connection = getConnection();
OperationResult result = connection.execute(new BaseKeyOperation<Object>(randomKey, OpName.EVAL) {
@Override
public Object execute(Jedis client, ConnectionContext state) {
if (randomKey == null) {
throw new IllegalStateException("Cannot extend lock with null value for key");
}
Object result = client.eval(command, 1, resource, randomKey);
return result;
}
});
cleanConnection(connection);
return result;
}
}
| 6,098 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/json/DynoJedisJsonClient.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.json;
import com.google.gson.Gson;
import com.netflix.dyno.connectionpool.ConnectionContext;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.jedis.JedisGenericOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.util.SafeEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class DynoJedisJsonClient {
private static final Logger LOG = LoggerFactory.getLogger(DynoJedisJsonClient.class);
private static final Gson gson = new Gson();
private final DynoJedisClient client;
public DynoJedisJsonClient(DynoJedisClient client) {
this.client = client;
}
public OperationResult<String> set(String key, Object object) {
return set(key, JsonPath.ROOT_PATH, object, ExistenceModifier.DEFAULT);
}
public OperationResult<String> set(String key, ExistenceModifier flag, Object object) {
return set(key, JsonPath.ROOT_PATH, object, flag);
}
public OperationResult<String> set(String key, JsonPath path, Object object) {
return set(key, path, object, ExistenceModifier.DEFAULT);
}
public OperationResult<String> set(String key, JsonPath path, Object object, ExistenceModifier flag) {
return client.moduleCommand(new JedisGenericOperation<String>(key, JsonCommand.GET.toString()) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
args.add(SafeEncoder.encode(gson.toJson(object)));
if (flag != ExistenceModifier.DEFAULT) {
args.add(flag.getRaw());
}
client.getClient().sendCommand(JsonCommand.SET, args.toArray(new byte[args.size()][]));
return client.getClient().getStatusCodeReply();
}
});
}
public OperationResult<Object> get(String key) {
return get(key, JsonPath.ROOT_PATH);
}
public OperationResult<Object> get(String key, JsonPath... paths) {
return client.moduleCommand(new JedisGenericOperation<Object>(key, JsonCommand.GET.toString()) {
@Override
public Object execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
Arrays.stream(paths).forEach(x -> args.add(SafeEncoder.encode(x.toString())));
client.getClient().sendCommand(JsonCommand.GET, args.toArray(new byte[args.size()][]));
return gson.fromJson(client.getClient().getBulkReply(), Object.class);
}
});
}
public OperationResult<Long> del(String key) {
return this.del(key, JsonPath.ROOT_PATH);
}
public OperationResult<Long> del(String key, JsonPath path) {
return client.moduleCommand(new JedisGenericOperation<Long>(key, JsonCommand.DEL.toString()) {
@Override
public Long execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
client.getClient().sendCommand(JsonCommand.DEL, args.toArray(new byte[args.size()][]));
return client.getClient().getIntegerReply();
}
});
}
public OperationResult<Class<?>> type(String key) {
return this.type(key, JsonPath.ROOT_PATH);
}
public OperationResult<Class<?>> type(String key, JsonPath path) {
return client.moduleCommand(new JedisGenericOperation<Class<?>>(key, JsonCommand.DEL.toString()) {
@Override
public Class<?> execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
client.getClient().sendCommand(JsonCommand.TYPE, args.toArray(new byte[args.size()][]));
final String reply = client.getClient().getBulkReply();
switch (reply) {
case "null":
return null;
case "boolean":
return boolean.class;
case "integer":
return int.class;
case "number":
return float.class;
case "string":
return String.class;
case "object":
return Object.class;
case "array":
return List.class;
default:
throw new java.lang.RuntimeException(reply);
}
}
});
}
public OperationResult<List<Object>> mget(List<String> keys, JsonPath path) {
return client.moduleCommand(new JedisGenericOperation<List<Object>>("", JsonCommand.MGET.toString()) {
@Override
public List<Object> execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
keys.forEach(key -> args.add(SafeEncoder.encode(key)));
args.add(SafeEncoder.encode(path.toString()));
client.getClient().sendCommand(JsonCommand.MGET, args.toArray(new byte[args.size()][]));
final List<String> reply = client.getClient().getMultiBulkReply();
final List<Object> response = new ArrayList<>(reply.size());
reply.forEach(r -> response.add(gson.fromJson(r, Object.class)));
return response;
}
});
}
public OperationResult<Long> arrappend(String key, JsonPath path, Object... items) {
return client.moduleCommand(new JedisGenericOperation<Long>(key, JsonCommand.ARRAPPEND.toString()) {
@Override
public Long execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
Arrays.asList(items)
.forEach(i -> args.add(SafeEncoder.encode(gson.toJson(i))));
client.getClient().sendCommand(JsonCommand.ARRAPPEND, args.toArray(new byte[args.size()][]));
return client.getClient().getIntegerReply();
}
});
}
public OperationResult<Long> arrinsert(String key, JsonPath path, int index, Object... items) {
return client.moduleCommand(new JedisGenericOperation<Long>(key, JsonCommand.ARRINSERT.toString()) {
@Override
public Long execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
args.add(SafeEncoder.encode(Integer.toString(index)));
Arrays.asList(items)
.forEach(i -> args.add(SafeEncoder.encode(gson.toJson(i))));
client.getClient().sendCommand(JsonCommand.ARRINSERT, args.toArray(new byte[args.size()][]));
return client.getClient().getIntegerReply();
}
});
}
public OperationResult<Long> arrlen(String key, JsonPath path) {
return client.moduleCommand(new JedisGenericOperation<Long>(key, JsonCommand.ARRLEN.toString()) {
@Override
public Long execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
client.getClient().sendCommand(JsonCommand.ARRLEN, args.toArray(new byte[args.size()][]));
return client.getClient().getIntegerReply();
}
});
}
public OperationResult<List<String>> objkeys(String key) {
return objkeys(key, JsonPath.ROOT_PATH);
}
public OperationResult<List<String>> objkeys(String key, JsonPath path) {
return client.moduleCommand(new JedisGenericOperation<List<String>>(key, JsonCommand.OBJKEYS.toString()) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
client.getClient().sendCommand(JsonCommand.OBJKEYS, args.toArray(new byte[args.size()][]));
return client.getClient().getMultiBulkReply();
}
});
}
public OperationResult<Long> objlen(String key) {
return objlen(key, JsonPath.ROOT_PATH);
}
public OperationResult<Long> objlen(String key, JsonPath path) {
return client.moduleCommand(new JedisGenericOperation<Long>(key, JsonCommand.OBJLEN.toString()) {
@Override
public Long execute(Jedis client, ConnectionContext state) throws DynoException {
final List<byte[]> args = new ArrayList<>();
args.add(SafeEncoder.encode(key));
args.add(SafeEncoder.encode(path.toString()));
client.getClient().sendCommand(JsonCommand.OBJLEN, args.toArray(new byte[args.size()][]));
return client.getClient().getIntegerReply();
}
});
}
}
| 6,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.