index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dynomite/DynomiteStandardTuner.java | package com.netflix.dynomitemanager.dynomite;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.identity.IInstanceState;
import com.netflix.nfsidecar.identity.InstanceIdentity;
import com.netflix.nfsidecar.instance.InstanceDataRetriever;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
import com.netflix.nfsidecar.utils.ProcessTuner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.Yaml;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
@Singleton
public class DynomiteStandardTuner implements ProcessTuner {
private static final Logger logger = LoggerFactory.getLogger(DynomiteStandardTuner.class);
private static final String ROOT_NAME = "dyn_o_mite";
public static final long GB_2_IN_KB = 2L * 1024L * 1024L;
protected final FloridaConfig floridaConfig;
protected final CommonConfig commonConfig;
protected final InstanceIdentity ii;
protected final StorageProxy storageProxy;
protected final IInstanceState instanceState;
protected final IEnvVariables envVariables;
protected final InstanceDataRetriever instanceDataRetriever;
public static final Pattern MEMINFO_PATTERN = Pattern.compile("MemTotal:\\s*([0-9]*)");
@Inject
public DynomiteStandardTuner(FloridaConfig floridaConfig, CommonConfig commonConfig, InstanceIdentity ii,
IInstanceState instanceState, StorageProxy storageProxy, IEnvVariables envVariables,
InstanceDataRetriever instanceDataRetriever) {
this.floridaConfig = floridaConfig;
this.commonConfig = commonConfig;
this.ii = ii;
this.instanceState = instanceState;
this.storageProxy = storageProxy;
this.envVariables = envVariables;
this.instanceDataRetriever = instanceDataRetriever;
}
/**
* This is a wrapper around the FP of the max allocated messages. Message
* allocation is based on the instancy type 2GB for Florida + 85% for Redis
*/
public int setMaxMsgs() {
if (floridaConfig.getDynomiteMaxAllocatedMessages() == 0) {
String instanceType = this.instanceDataRetriever.getInstanceType();
if (instanceType.contains(".xlarge")) {
// r3.xlarge: 30.5GB RAM (2.5GB available)
logger.info("Instance Type: " + instanceType + " ---> " + " Max Msgs: " + 100000);
return 100000;
} else if (instanceType.contains(".2xlarge")) {
// r3.2xlarge: 61GB RAM (7.15GB available)
logger.info("Instance Type: " + instanceType + " ---> " + " Max Msgs: " + 300000);
return 300000;
} else if (instanceType.contains(".4xlarge")) {
// r3.4xlarge: 122GB RAM (16.3GB available)
logger.info("Instance Type: " + instanceType + " ---> " + " Max Msgs: " + 800000);
return 800000;
} else if (instanceType.contains(".8xlarge")) {
// r3.8xlarge: 244GB RAM (34.19GB available)
logger.info("Instance Type: " + instanceType + " ---> " + " Max Msgs: " + 1000000);
return 1000000;
} else
return 500000;
}
return floridaConfig.getDynomiteMaxAllocatedMessages();
}
/**
* we want to throw the exception for higher layer to handle it.
* @throws Exception
*/
public void writeAllProperties(String yamlLocation) throws Exception {
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
Yaml yaml = new Yaml(options);
File yamlFile = new File(yamlLocation);
Map map = (Map) yaml.load(new FileInputStream(yamlFile));
Map<String, Object> entries = (Map) map.get(ROOT_NAME);
entries.clear();
entries.put("auto_eject_hosts", floridaConfig.getDynomiteAutoEjectHosts());
entries.put("rack", envVariables.getRack());
entries.put("distribution", floridaConfig.getDistribution());
entries.put("dyn_listen", "0.0.0.0:" + commonConfig.getDynomitePeerPort());
if (commonConfig.isDynomiteStatsLocalHostOnly())
entries.put("stats_listen", "127.0.0.1:" + commonConfig.getDynomiteStatsPort());
else
entries.put("stats_listen", "0.0.0.0:" + commonConfig.getDynomiteStatsPort());
entries.put("dyn_seed_provider", floridaConfig.getDynomiteSeedProvider());
entries.put("gos_interval", floridaConfig.getDynomiteGossipInterval());
entries.put("hash", floridaConfig.getDynomiteHashAlgorithm());
entries.put("listen", "0.0.0.0:" + floridaConfig.getDynomiteClientPort());
entries.put("preconnect", floridaConfig.getDynomiteStoragePreconnect());
entries.put("server_retry_timeout", floridaConfig.getServerRetryTimeout());
entries.put("timeout", floridaConfig.getTimeout());
entries.put("tokens", ii.getTokens());
entries.put("secure_server_option", floridaConfig.getDynomiteIntraClusterSecurity());
entries.remove("redis");
entries.put("datacenter", envVariables.getRegion());
entries.put("read_consistency", floridaConfig.getDynomiteReadConsistency());
entries.put("write_consistency", floridaConfig.getDynomiteWriteConsistency());
entries.put("mbuf_size", floridaConfig.getDynomiteMBufSize());
entries.put("max_msgs", setMaxMsgs());
entries.put("pem_key_file", floridaConfig.getDynomiteInstallDir() + "/conf/dynomite.pem");
if (!floridaConfig.getDynomiteHashtag().isEmpty()) {
if (floridaConfig.getDynomiteHashtag().length() != 2) {
logger.error("Hashtag must be of length 2. Provided hashtag: " + floridaConfig.getDynomiteHashtag()
+ " has length: " + floridaConfig.getDynomiteHashtag().length());
logger.error("Not setting any hashtag");
throw new RuntimeException("Hashtag is larger than 2 characters");
} else {
entries.put("hash_tag", floridaConfig.getDynomiteHashtag());
}
}
else {
logger.info("no hashtag FP defined");
}
List<String> seedp = (List) entries.get("dyn_seeds");
if (seedp == null) {
seedp = new ArrayList<String>();
entries.put("dyn_seeds", seedp);
} else {
seedp.clear();
}
List<String> seeds = ii.getSeeds();
if (seeds.size() != 0) {
for (String seed : seeds) {
seedp.add(seed);
}
} else {
entries.remove("dyn_seeds");
}
List<String> servers = (List) entries.get("servers");
if (servers == null) {
servers = new ArrayList<String>();
entries.put("servers", servers);
} else {
servers.clear();
}
entries.put("data_store", storageProxy.getEngineNumber());
if (!storageProxy.getUnixPath().equals("")) {
servers.add(storageProxy.getUnixPath() + ":1");
} else {
servers.add(storageProxy.getIpAddress() + ":" + storageProxy.getPort() + ":1");
}
if (floridaConfig.getConnectionPoolEnabled()) {
entries.put("datastore_connections", floridaConfig.getDatastoreConnections());
entries.put("local_peer_connections", floridaConfig.getLocalPeerConnections());
entries.put("remote_peer_connections", floridaConfig.getRemotePeerConnections());
}
if (!this.instanceState.getYmlWritten()) {
logger.info("YAML Dump: ");
logger.info(yaml.dump(map));
storageProxy.updateConfiguration();
} else {
logger.info("Updating dynomite.yml with latest information");
}
yaml.dump(map, new FileWriter(yamlLocation));
this.instanceState.setYmlWritten(true);
}
@SuppressWarnings("unchecked")
public void updateAutoBootstrap(String yamlFile, boolean autobootstrap) throws IOException {
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
Yaml yaml = new Yaml(options);
@SuppressWarnings("rawtypes")
Map map = (Map) yaml.load(new FileInputStream(yamlFile));
// Dont bootstrap in restore mode
map.put("auto_bootstrap", autobootstrap);
logger.info("Updating yaml" + yaml.dump(map));
yaml.dump(map, new FileWriter(yamlFile));
}
}
| 3,100 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/monitoring/SimpleJedisFactory.java | package com.netflix.dynomitemanager.monitoring;
import redis.clients.jedis.Jedis;
public class SimpleJedisFactory implements JedisFactory {
public SimpleJedisFactory() {
}
@Override
public Jedis newInstance(String hostname, int port) {
return new Jedis(hostname, port);
}
} | 3,101 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/monitoring/RedisInfoMetricsTask.java | package com.netflix.dynomitemanager.monitoring;
import java.io.ByteArrayInputStream;
import java.io.InputStreamReader;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.storage.RedisInfoParser;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.LongGauge;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.NumericMonitor;
@Singleton
public class RedisInfoMetricsTask extends Task {
private static final Logger Logger = LoggerFactory.getLogger(RedisInfoMetricsTask.class);
public static final String TaskName = "Redis-Info-Task";
private static final Set<String> COUNTER_LIST = new HashSet<String>();
static {
COUNTER_LIST.add("Redis_Stats_instantaneous_ops_per_sec");
}
private final ConcurrentHashMap<String, LongGauge> redisInfoGaugeMetrics = new ConcurrentHashMap<String, LongGauge>();
private final ConcurrentHashMap<String, NumericMonitor<Number>> redisInfoCounterMap = new ConcurrentHashMap<String, NumericMonitor<Number>>();
private JedisFactory jedisFactory;
private StorageProxy storageProxy;
/**
* Default constructor
*
* @param storageProxy
* @param jedisFactory
*/
@Inject
public RedisInfoMetricsTask(StorageProxy storageProxy, JedisFactory jedisFactory) {
this.jedisFactory = jedisFactory;
this.storageProxy = storageProxy;
}
@Override
public void execute() throws Exception {
Jedis jedis = jedisFactory.newInstance(storageProxy.getIpAddress(),storageProxy.getPort());
try {
jedis.connect();
String s = jedis.info();
InputStreamReader reader = new InputStreamReader(new ByteArrayInputStream(s.getBytes()));
RedisInfoParser infoParser = new RedisInfoParser();
Map<String, Long> metrics = infoParser.parse(reader);
processMetrics(metrics);
} catch (Exception e) {
Logger.error("Could not get jedis info metrics", e);
} finally {
jedis.disconnect();
}
}
private void processMetrics(Map<String, Long> metrics) {
for (String key : metrics.keySet()) {
Long value = metrics.get(key);
if (COUNTER_LIST.contains(key)) {
processCounterMetric(key, value);
} else {
processGaugeMetric(key, value);
}
}
}
private void processGaugeMetric(String key, Long value) {
if (Logger.isDebugEnabled()) {
Logger.debug("Process gauge: " + key + " " + value);
}
LongGauge oldGauge = redisInfoGaugeMetrics.get(key);
if (oldGauge != null) {
oldGauge.set(value);
return;
}
// create a new long gauge
LongGauge newGauge = new LongGauge(MonitorConfig.builder(key).build());
oldGauge = redisInfoGaugeMetrics.putIfAbsent(key, newGauge);
if (oldGauge == null) {
newGauge.set(value);
DefaultMonitorRegistry.getInstance().register(newGauge);
} else {
// someone else beat us to it. just use the oldGauge
oldGauge.set(value);
}
}
private void processCounterMetric(String counterName, Long val) {
if (Logger.isDebugEnabled()) {
Logger.debug("Process counter: " + counterName + " " + val);
}
NumericMonitor<Number> counter = redisInfoCounterMap.get(counterName);
if (counter != null) {
long increment = val - counter.getValue().longValue();
((Counter) counter).increment(increment);
return;
}
counter = Monitors.newCounter(counterName);
NumericMonitor<Number> oldCounter = redisInfoCounterMap.putIfAbsent(counterName, counter);
if (oldCounter == null) {
// this is the 1st time
DefaultMonitorRegistry.getInstance().register(counter);
} else {
// someone beat us to it, take their obj instead
counter = oldCounter;
}
long increment = val - counter.getValue().longValue();
((Counter) counter).increment(increment);
}
@Override
public String getName() {
return TaskName;
}
public static TaskTimer getTimer() {
// run once every 30 seconds
return new SimpleTimer(TaskName, 30 * 1000);
}
}
| 3,102 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/monitoring/JedisFactory.java | package com.netflix.dynomitemanager.monitoring;
import redis.clients.jedis.Jedis;
public interface JedisFactory {
public Jedis newInstance(String hostname, int port);
}
| 3,103 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/monitoring/ProcessMonitorTask.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.monitoring;
import com.google.common.base.Stopwatch;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.dynomite.IDynomiteProcess;
import com.netflix.dynomitemanager.storage.JedisUtils;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import org.quartz.StatefulJob;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.concurrent.TimeUnit.MICROSECONDS;
/**
* This is the only class that initiates starting and stopping
* storage proxy, and storage processes. Hence, the updating of InstanceState
* only happens on one thread, however, it can be read by multiple threads.
* It is very important that it stays this way to keep process management
* simple and in one place.
*
* Monitors Dynomite and Storage process and handles different
* process failure scenarios.
*
* This class in cooperates with {@link com.netflix.dynomitemanager.storage.WarmBootstrapTask},
* {@link com.netflix.dynomitemanager.storage.StorageProxy}, {@link com.netflix.dynomitemanager.dynomite.IDynomiteProcess},
* and {@link com.netflix.dynomitemanager.config.InstanceState}.
* to handle the following cases.
*
* Case 1: Redis dies
* Case 2:
* a. Dynomite dies or
* b. Dynomite process has hung
* Case 3: Florida dies (with or without one of the other two processes being dead)
* Case 4: Florida and dynomite dies
* Case 5: Redis + Dynomite dies
* Case 6: Redis + Dynomite + Florida dies
*
* Currently the storage (Redis specifically) is launched by the dynomite launch script.
* TODO: The Redis could be directly launched from Dynomite.
*
* @author Monal Daxini
* @author Minh Do
* @author ipapapa
*/
@Singleton
public class ProcessMonitorTask extends Task implements StatefulJob {
public static final String JOBNAME = "DYNOMITE_PROCESS_MONITOR_THREAD";
private static final Logger logger = LoggerFactory.getLogger(ProcessMonitorTask.class);
private final FloridaConfig config;
private InstanceState instanceState;
private final StorageProxy storageProxy;
private final IDynomiteProcess dynomiteProcess;
@Inject
protected ProcessMonitorTask(FloridaConfig config, InstanceState instanceState,
StorageProxy storageProxy, IDynomiteProcess dynomiteProcess) {
this.config = config;
this.instanceState = instanceState;
this.storageProxy = storageProxy;
this.dynomiteProcess = dynomiteProcess;
}
@Override
public void execute() throws Exception {
Stopwatch stopwatch = Stopwatch.createStarted();
if (instanceState.getIsProcessMonitoringSuspended()) {
return;
}
logger.info("Healthy " + instanceState.isHealthy());
instanceState.setStorageProxyProcessAlive(this.dynomiteProcess.dynomiteProcessCheck());
instanceState.setStorageProxyAlive(JedisUtils.isAliveWithRetry(config.getDynomiteLocalAddress(), config.getDynomiteClientPort()));
instanceState.setStorageAlive(storageProxy.isAlive());
logger.info(String.format("ProcessMonitor state: %s, time elapsted to check (micros): %s",
instanceState, stopwatch.elapsed(MICROSECONDS)));
if((!instanceState.isStorageProxyProcessAlive())) {
if (!instanceState.isStorageAlive()) {
logger.error("FATAL: Redis is down.");
// TODO: Take appropriate action.
}
else {
logger.info("Detected Dynomite process is not running. Restarting dynomite.");
}
dynomiteProcess.start();
}
/*
if((!instanceState.isStorageProxyAlive() && instanceState.isStorageProxyProcessAlive())) {
if (!instanceState.isStorageAlive()) {
logger.info("Stopping dynomite process isStorageAlive=false. Restarting dynomite will restart storage");
}
else {
logger.info("Stopping hung dynomite process.");
}
dynProcess.stop();
}
*/
/*
if (instanceState.isBootstrapping()) {
logger.info("Instance is bootstrapping. Skipping further process checks.");
return;
}
if (!instanceState.isStorageAlive()) {
if(instanceState.isStorageProxyAlive() || instanceState.isStorageProxyProcessAlive()) {
logger.info("Stopping Dynomite process before warm bootstrapping.");
dynProcess.stop();
}
if (config.isWarmBootstrap()) {
logger.info("Warm bootstraping node. Scheduling BootstrapTask now!");
scheduler.runTaskNow(WarmBootstrapTask.class);
}
else {
logger.info("Cold bootstraping, launching dynomite and storage process.");
dynProcess.start(true);
}
logger.info(String.format("After corrective action ProcessMonitor state: %s, time elapsed to check (micros): %s",
instanceState, stopwatch.elapsed(MICROSECONDS)));
}
else if(!instanceState.isStorageProxyAlive()) {
logger.info("Launching dynomite process.");
// starts launch dynomite script, which starts Redis if it's not already running.
dynProcess.start(true);
logger.info(String.format("After corrective action ProcessMonitor state: %s, time elapsted to check (micros): %s",
instanceState, stopwatch.elapsed(MICROSECONDS)));
}
*/
stopwatch.stop();
if (logger.isDebugEnabled()) {
logger.debug(String.format("Time to run the check (micros): %s", stopwatch.elapsed(MICROSECONDS)));
}
}
// Start every 15 seconds.
public static TaskTimer getTimer()
{
return new SimpleTimer(JOBNAME, 15L * 1000);
}
@Override
public String getName()
{
return JOBNAME;
}
}
| 3,104 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/monitoring/ServoMetricsTask.java | package com.netflix.dynomitemanager.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicStringProperty;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.*;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.methods.GetMethod;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
/**
* Simple class that encapsulates a {@link Task} implementation that fetches
* metrics from a remote source in the form of a json payload and then
* translates the payload into a set of Servo metrics - {@link Counter} and
* {@link Gauge}
*
* Few things to note.
*
* 1. The remote resource is specified via a url. There is a default path.
* Override this path using archaius (if needed) The fast property name is
* 'florida.metrics.url'
*
* 2. The json response can have nested maps of metrics. This class traverses
* all the levels and flattens the metrics out using prefixs e.g {a {b {c} } }
* will get flattened out to a__b__c as a metric name.
*
* 3. Most metrics are {@link Counter}s, but some metrics can be {@link Gauge}s.
* For {@link Gauge}s one must configure the active whitelist of metric names
* via archaius. The fast property name is 'florida.metrics.gauge.whitelist'
*
* 4. Note that this class maintains a map of servo counters and gauges, so that
* they don't have to be recreated all the time. Hence this class maintains
* state and needs to be a singleton.
*
*
*
*/
@Singleton
public class ServoMetricsTask extends Task {
private static final Logger Logger = LoggerFactory.getLogger(ServoMetricsTask.class);
public static final String TaskName = "Servo-Metrics-Task";
// Fast Property for configuring the remote resource to talk to
private final DynamicStringProperty ServerMetricsUrl = DynamicPropertyFactory.getInstance()
.getStringProperty("florida.metrics.url", "http://localhost:22222/info");
// Fast Property for configuring a gauge whitelist (if needed)
private final DynamicStringProperty GaugeWhitelist = DynamicPropertyFactory.getInstance()
.getStringProperty("florida.metrics.gauge.whitelist", "");
// The gauge whitelist that is being maintained. Note that we keep a
// reference to it that can be update dynamically
// if the fast property is changed externally
private final AtomicReference<Set<String>> gaugeFilter = new AtomicReference<Set<String>>(new HashSet<String>());
// The map of servo metrics
private final ConcurrentHashMap<String, NumericMonitor<Number>> metricMap = new ConcurrentHashMap<String, NumericMonitor<Number>>();
private final InstanceState state;
@Inject
public ServoMetricsTask(InstanceState state) {
this.state = state;
initGaugeWhitelist();
GaugeWhitelist.addCallback(new Runnable() {
@Override
public void run() {
initGaugeWhitelist();
}
});
}
/**
* Returns a timer that enables this task to run once every 60 seconds
*
* @return TaskTimer
*/
public static TaskTimer getTimer() {
// run once every 30 seconds
return new SimpleTimer(TaskName, 15 * 1000);
}
/**
* The name of the task
*
* @return String
*/
@Override
public String getName() {
return TaskName;
}
/**
* @return metricMap
*/
public ConcurrentHashMap<String, NumericMonitor<Number>> getMetricsMap() {
return metricMap;
}
/**
* Main execute() impl for this task. It makes a call to the remote service,
* and if the response is a 200 with a json body, then this parses the json
* response into servo metrics.
*
* Note that new metrics that weren't tracked before start being tracked,
* and old metrics that are already there are simply updated with the new
* values from the http response.
*
*/
@Override
public void execute() throws Exception {
HttpClient client = null;
GetMethod get = null;
// update health state. I think we can merge the health check and info
// check into one check later.
// However, health check also touches the underneath storage, not just
// Dynomite
processGaugeMetric("dynomite__health", state.isHealthy() ? 1L : 0L);
try {
client = new HttpClient();
client.getHttpConnectionManager().getParams().setConnectionTimeout(2000);
get = new GetMethod(ServerMetricsUrl.get());
int statusCode = client.executeMethod(get);
if (!(statusCode == 200)) {
Logger.error("Got non 200 status code from " + ServerMetricsUrl.get());
return;
}
String response = get.getResponseBodyAsString();
if (Logger.isDebugEnabled()) {
Logger.debug("Received response from " + ServerMetricsUrl.get() + "\n" + response);
}
if (!response.isEmpty()) {
processJsonResponse(response);
} else {
Logger.error("Cannot parse empty response from " + ServerMetricsUrl.get());
}
} catch (Exception e) {
Logger.error("Failed to get metrics from Dynomite's REST endpoint: " + ServerMetricsUrl.get(), e);
e.printStackTrace();
} catch (Throwable t) {
Logger.error("FAILED to get metrics from Dynomite's REST endpoint: " + ServerMetricsUrl.get(), t);
t.printStackTrace();
} finally {
if (get != null) {
get.releaseConnection();
}
if (client != null) {
client.getHttpConnectionManager().closeIdleConnections(10);
}
}
}
/**
* Parse the Json Payload and convert them to metrics.
*
* Example Json Payload
*
* {"service":"dynomite", "source":"florida-i-16ca1846", "version":"0.3.1",
* "uptime":40439, "timestamp":1399064677, "datacenter":"DC1", "dyn_o_mite":
* {"client_eof":0, "client_err":0, "client_connections":0,
* "server_ejects":0, "forward_error":0, "fragments":0, "stats_count":0,
* "guage" : { } "127.0.0.1": {"server_eof":0, "server_err":0,
* "server_timedout":0, "server_connections":0, "server_ejected_at":0,
* "requests":0, "request_bytes":0, "responses":0, "response_bytes":0,
* "in_queue":0, "in_queue_bytes":0, "out_queue":0, "out_queue_bytes":0 } }
* }
*
* @param json
* @throws Exception
*/
public void processJsonResponse(String json) throws Exception {
JSONParser parser = new JSONParser();
JSONObject obj = (JSONObject) parser.parse(json);
String service = (String) obj.get("service");
if (service.isEmpty()) {
Logger.error("Missing required key 'service' in json response: " + json);
return;
}
// uptime
Long uptime = (Long) obj.get("uptime");
if (uptime == null) {
Logger.error("Missing required key 'uptime' in json response: " + json);
uptime = 0L;
}
processCounterMetric(service + "__uptime", uptime);
String[] fields = { "latency_max", "latency_999th", "latency_99th", "latency_95th", "latency_mean",
"payload_size_max", "payload_size_999th", "payload_size_99th", "payload_size_95th", "payload_size_mean",
"alloc_msgs", "free_msgs", "average_cross_region_rtt", "99_cross_region_rtt",
"average_cross_zone_latency", "99_cross_zone_latency", "average_server_latency", "99_server_latency",
"average_cross_region_queue_wait", "99_cross_region_queue_wait", "average_cross_zone_queue_wait",
"99_cross_zone_queue_wait", "average_server_queue_wait", "99_server_queue_wait", "client_out_queue_99",
"server_in_queue_99", "server_out_queue_99", "dnode_client_out_queue_99", "peer_in_queue_99",
"peer_out_queue_99", "remote_peer_in_queue_99", "remote_peer_out_queue_99", "alloc_mbufs",
"free_mbufs" };
for (int i = 0; i < fields.length; i++) {
Long val = (Long) obj.get(fields[i]);
if (val == null) {
// Logger.error("Missing required key " + fields[i] + " in json
// response: " + json);
val = 0L;
}
processGaugeMetric(service + "__" + fields[i], val);
}
JSONObject stats = (JSONObject) obj.get("dyn_o_mite");
if (stats == null) {
Logger.error("Missing key 'dyn_o_mite' in json response: " + json);
return;
}
parseObjectMetrics(service, stats);
}
/**
* Helper to recursively flatten out a metric from a nested collection
*
* @param namePrefix
* @param obj
*/
private void parseObjectMetrics(String namePrefix, JSONObject obj) {
for (Object key : obj.keySet()) {
Object val = obj.get(key);
if (val instanceof JSONObject) {
parseObjectMetrics(namePrefix + "__" + key, (JSONObject) val);
} else {
if (gaugeFilter.get().contains((String) key)) {
processGaugeMetric(namePrefix + "__" + (String) key, (Long) val);
} else {
processCounterMetric(namePrefix + "__" + (String) key, (Long) val);
}
}
}
}
/**
* Helper that tracks the metric value in a {@link Counter} A new one is
* created if it does not exist.
*
* @param counterName
* @param val
*/
private void processCounterMetric(String counterName, Long val) {
if (Logger.isDebugEnabled()) {
Logger.debug("Process counter: " + counterName + " " + val);
}
NumericMonitor<Number> counter = metricMap.get(counterName);
if (counter != null) {
long increment = val - counter.getValue().longValue();
((Counter) counter).increment(increment);
return;
}
counter = Monitors.newCounter(counterName);
NumericMonitor<Number> oldCounter = metricMap.putIfAbsent(counterName, counter);
if (oldCounter == null) {
// this is the 1st time
DefaultMonitorRegistry.getInstance().register(counter);
} else {
// someone beat us to it, take their obj instead
counter = oldCounter;
}
long increment = val - counter.getValue().longValue();
((Counter) counter).increment(increment);
}
/**
* Helper that tracks the metric value in a {@link Gauge} A new one is
* created if it does not exist.
*
* @param gaugeName
* @param val
*/
private void processGaugeMetric(String gaugeName, Long val) {
if (Logger.isDebugEnabled()) {
Logger.debug("Process guage: " + gaugeName + " " + val);
}
NumericMonitor<Number> gauge = metricMap.get(gaugeName);
if (gauge != null) {
((SimpleGauge) gauge).setValue(val);
return;
}
gauge = new SimpleGauge(gaugeName, val);
NumericMonitor<Number> oldGauge = metricMap.putIfAbsent(gaugeName, gauge);
if (oldGauge == null) {
((SimpleGauge) gauge).setValue(val);
DefaultMonitorRegistry.getInstance().register(gauge);
} else {
((SimpleGauge) oldGauge).setValue(val);
}
}
/**
* Helper that tracks the gauge whitelist using defaults and what is set in
* the fast property
*/
@SuppressWarnings("unchecked")
private void initGaugeWhitelist() {
Set<String> set = new HashSet<String>();
set.add("server_connections");
set.add("client_connections");
set.add("dnode_client_connections");
set.add("peer_connections");
// set.add("client_dropped_requests");
// set.add("alloc_msgs");
String s = GaugeWhitelist.get();
if (!s.isEmpty()) {
String[] parts = s.split(",");
if (parts != null && parts.length > 0) {
set.addAll(Arrays.asList(parts));
}
}
gaugeFilter.set(set);
}
/**
* Simple impl of the {@link Gauge} Note that it maintains a threadsafe
* reference to the actual value being monitored
*
* @author poberai
*
*/
private class SimpleGauge implements Gauge<Number> {
private final MonitorConfig mConfig;
private final AtomicReference<Number> value = new AtomicReference<Number>(null);
private SimpleGauge(String name, Number number) {
mConfig = MonitorConfig.builder(name).build();
value.set(number);
}
@Override
public Number getValue() {
return value.get();
}
@Override
public Number getValue(int pollerIndex) {
return getValue();
}
@Override
public MonitorConfig getConfig() {
return mConfig;
}
public void setValue(Number n) {
value.set(n);
}
}
}
| 3,105 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/backup/SnapshotTask.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.backup;
import java.io.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.joda.time.DateTime;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.backup.Backup;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.scheduler.CronTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.nfsidecar.scheduler.CronTimer.DayOfWeek;
import com.netflix.nfsidecar.utils.ThreadSleeper;
/**
* Task for taking snapshots
*/
@Singleton
public class SnapshotTask extends Task {
public static final String TaskName = "SnapshotTask";
private static final Logger logger = LoggerFactory.getLogger(SnapshotTask.class);
private final ThreadSleeper sleeper = new ThreadSleeper();
private final InstanceState state;
private final StorageProxy storageProxy;
private final Backup backup;
private final FloridaConfig floridaConfig;
private final int storageRetries = 5;
@Inject
public SnapshotTask(FloridaConfig floridaConfig, InstanceState state, StorageProxy storageProxy, Backup backup) {
this.floridaConfig = floridaConfig;
this.state = state;
this.storageProxy = storageProxy;
this.backup = backup;
}
public void execute() throws Exception {
this.state.setFirstBackup(false);
if (!state.isRestoring() && !state.isBootstrapping()) {
/**
* Iterate five times until storage (Redis) is ready. We need
* storage to be ready to dumb the data, otherwise we may backup
* older data. Another case, is that the thread that starts Dynomite
* has not started Redis yet.
*/
int i = 0;
for (i = 0; i < this.storageRetries; i++) {
if (!this.state.isStorageAlive()) {
// sleep 2 seconds to make sure Dynomite process is up,
// Storage process is up.
sleeper.sleepQuietly(2000);
} else {
this.state.setBackingup(true);
/**
* Set the status of the backup to false every time we start
* a backup. This will ensure that prior to backup we
* recapture the status of the backup.
*/
this.state.setBackUpStatus(false);
// the storage proxy takes a snapshot or compacts data
boolean snapshot = this.storageProxy.takeSnapshot();
File file = null;
if (floridaConfig.persistenceType().equals("aof")) {
file = new File(floridaConfig.getPersistenceLocation() + "/appendonly.aof");
} else {
file = new File(floridaConfig.getPersistenceLocation() + "/nfredis.rdb");
}
// upload the data to S3
if (file.length() > 0 && snapshot == true) {
DateTime now = DateTime.now();
DateTime todayStart = now.withTimeAtStartOfDay();
this.state.setBackupTime(todayStart);
if (this.backup.upload(file, todayStart)) {
this.state.setBackUpStatus(true);
logger.info("S3 backup status: Completed!");
} else {
logger.error("S3 backup status: Failed!");
}
} else {
logger.warn("S3 backup: Redis AOF file length is zero - nothing to backup");
}
break;
}
}
if (i == this.storageRetries) {
logger.error("S3 backup Failed: Redis was not up after " + this.storageRetries + " retries");
}
this.state.setBackingup(false);
} else {
logger.error("S3 backup Failed: Restore is happening");
}
}
@Override
public String getName() {
return TaskName;
}
/**
* Returns a timer that enables this task to run on a scheduling basis
* defined by FP if the BackupSchedule == week, it runs on Monday if the
* BackupSchedule == day, it runs everyday.
*
* @return TaskTimer
*/
public static TaskTimer getTimer(CommonConfig commonConfig) {
int hour = commonConfig.getBackupHour();
if (commonConfig.getBackupSchedule().equals("week")) {
return new CronTimer(DayOfWeek.MON, hour, 1, 0);
}
return new CronTimer(hour, 1, 0);
}
}
| 3,106 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/backup/RestoreTask.java | package com.netflix.dynomitemanager.backup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.joda.time.DateTime;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.dynomite.IDynomiteProcess;
import com.netflix.dynomitemanager.storage.StorageProcessManager;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.backup.Restore;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.utils.Sleeper;
/**
* Task for restoring snapshots from object storage
*/
@Singleton
public class RestoreTask extends Task {
public static final String TaskName = "RestoreTask";
private static final Logger logger = LoggerFactory.getLogger(RestoreTask.class);
private final InstanceState state;
private final StorageProxy storageProxy;
private final IDynomiteProcess dynProcess;
private final Sleeper sleeper;
private final Restore restore;
private StorageProcessManager storageProcessMgr;
private final CommonConfig config;
@Inject
public RestoreTask(CommonConfig config, InstanceState state, StorageProxy storageProxy,
IDynomiteProcess dynProcess, Sleeper sleeper, Restore restore, StorageProcessManager storageProcessMgr) {
this.config = config;
this.state = state;
this.storageProxy = storageProxy;
this.dynProcess = dynProcess;
this.sleeper = sleeper;
this.restore = restore;
this.storageProcessMgr = storageProcessMgr;
}
public void execute() throws Exception {
this.state.setRestoring(true);
this.state.setFirstRestore(false);
/**
* Set the status of the restore to "false" every time we start a
* restore. This will ensure that prior to restore we recapture the
* status of the restore.
*/
this.state.setRestoreStatus(false);
/* stop dynomite process */
this.dynProcess.stop();
// stop storage process
this.storageProcessMgr.stop();
/* restore from Object Storage */
if (restore.restoreData(config.getRestoreDate())) {
/* start storage process and load data */
logger.info("Restored successful: Starting storage process with loading data.");
this.storageProcessMgr.start();
if (!this.storageProxy.loadingData()) {
logger.error("Restore not successful: Restore failed because of Redis.");
}
logger.info("Restore Completed, sleeping 5 seconds before starting Dynomite!");
sleeper.sleepQuietly(5000);
this.dynProcess.start();
logger.info("Dynomite started");
this.state.setRestoreStatus(true);
} else {
/* start storage process without loading data */
logger.error("Restore not successful: Starting storage process without loading data.");
}
this.state.setRestoring(false);
this.state.setRestoreTime(DateTime.now());
}
@Override
public String getName() {
return TaskName;
}
}
| 3,107 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dualAccount/AwsRoleAssumptionCredential.java | package com.netflix.dynomitemanager.dualAccount;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.google.inject.Inject;
import com.netflix.nfsidecar.aws.ICredential;
import com.netflix.nfsidecar.config.AWSCommonConfig;
import com.netflix.nfsidecar.identity.InstanceEnvIdentity;
public class AwsRoleAssumptionCredential implements ICredential {
private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "AwsRoleAssumptionSession";
private ICredential cred;
private InstanceEnvIdentity insEnvIdentity;
private AWSCredentialsProvider stsSessionCredentialsProvider;
private AWSCommonConfig config;
@Inject
public AwsRoleAssumptionCredential(ICredential cred, AWSCommonConfig config, InstanceEnvIdentity insEnvIdentity) {
this.cred = cred;
this.config = config;
this.insEnvIdentity = insEnvIdentity;
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
if (this.stsSessionCredentialsProvider == null) {
synchronized (this) {
if (this.config.isDualAccount() || this.stsSessionCredentialsProvider == null) {
String roleArn = null;
/**
* Create the assumed IAM role based on the environment. For
* example, if the current environment is VPC, then the
* assumed role is for EC2 classic, and vice versa.
*/
if (this.insEnvIdentity.isClassic()) {
roleArn = this.config.getVpcAWSRoleAssumptionArn(); // Env
// is
// EC2
// classic
// -->
// IAM
// assumed
// role
// for
// VPC
// created
} else {
roleArn = this.config.getClassicAWSRoleAssumptionArn(); // Env
// is
// VPC
// -->
// IAM
// assumed
// role
// for
// EC2
// classic
// created
}
//
if (roleArn == null || roleArn.isEmpty())
throw new NullPointerException(
"Role ARN is null or empty probably due to missing config entry");
/**
* Get handle to an implementation that uses AWS Security
* Token Service (STS) to create temporary, short-lived
* session with explicit refresh for session/token
* expiration.
*/
try {
this.stsSessionCredentialsProvider = new STSAssumeRoleSessionCredentialsProvider(
this.cred.getAwsCredentialProvider(), roleArn, AWS_ROLE_ASSUMPTION_SESSION_NAME);
} catch (Exception ex) {
throw new IllegalStateException(
"Exception in getting handle to AWS Security Token Service (STS). Msg: "
+ ex.getLocalizedMessage(),
ex);
}
}
}
}
return this.stsSessionCredentialsProvider;
}
}
| 3,108 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/identity/InstanceIdentity.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.identity;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.nfsidecar.config.AWSCommonConfig;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.instance.InstanceDataRetriever;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
import com.netflix.nfsidecar.tokensdb.IAppsInstanceFactory;
import com.netflix.nfsidecar.utils.ITokenManager;
import com.netflix.nfsidecar.utils.RetryableCallable;
import com.netflix.nfsidecar.utils.Sleeper;
/**
* This class provides the central place to create and consume the identity of
* the instance - token, seeds etc.
*
*/
@Singleton
public class InstanceIdentity {
private static final Logger logger = LoggerFactory.getLogger(InstanceIdentity.class);
private static final String DUMMY_INSTANCE_ID = "new_slot";
private final ListMultimap<String, AppsInstance> locMap = Multimaps
.newListMultimap(new HashMap<String, Collection<AppsInstance>>(), new Supplier<List<AppsInstance>>() {
public List<AppsInstance> get() {
return Lists.newArrayList();
}
});
private final IAppsInstanceFactory factory;
private final IMembership membership;
private final CommonConfig commonConfig;
private final AWSCommonConfig aWSCommonConfig;
private final Sleeper sleeper;
private final ITokenManager tokenManager;
private final InstanceEnvIdentity insEnvIdentity;
private final InstanceDataRetriever retriever;
private final IEnvVariables envVariables;
private final Predicate<AppsInstance> differentHostPredicate = new Predicate<AppsInstance>() {
@Override
public boolean apply(AppsInstance instance) {
return (!instance.getInstanceId().equalsIgnoreCase(DUMMY_INSTANCE_ID)
&& !instance.getHostName().equals(myInstance.getHostName()));
}
};
private AppsInstance myInstance;
private boolean isReplace = false;
private boolean isTokenPregenerated = false;
private boolean isNewToken = false;
private String replacedIp = "";
@Inject
public InstanceIdentity(IAppsInstanceFactory factory, IMembership membership, AWSCommonConfig aWSCommonConfig,
CommonConfig commonConfig, Sleeper sleeper, ITokenManager tokenManager, InstanceEnvIdentity insEnvIdentity,
InstanceDataRetriever retriever, IEnvVariables envVariables) throws Exception {
this.factory = factory;
this.membership = membership;
this.commonConfig = commonConfig;
this.aWSCommonConfig = aWSCommonConfig;
this.sleeper = sleeper;
this.tokenManager = tokenManager;
this.insEnvIdentity = insEnvIdentity;
this.retriever = retriever;
this.envVariables = envVariables;
init();
}
public AppsInstance getInstance() {
return myInstance;
}
public void init() throws Exception {
// try to grab the token which was already assigned
myInstance = new RetryableCallable<AppsInstance>() {
@Override
public AppsInstance retriableCall() throws Exception {
// Check if this node is decommissioned
for (AppsInstance ins : factory.getAllIds(envVariables.getDynomiteClusterName() + "-dead")) {
logger.debug(String.format("[Dead] Iterating though the hosts: %s", ins.getInstanceId()));
if (ins.getInstanceId().equals(retriever.getInstanceId())) {
ins.setOutOfService(true);
return ins;
}
}
for (AppsInstance ins : factory.getAllIds(envVariables.getDynomiteClusterName())) {
logger.debug(String.format("[Alive] Iterating though the hosts: %s My id = [%s]",
ins.getInstanceId(), ins.getId()));
if (ins.getInstanceId().equals(retriever.getInstanceId()))
return ins;
}
return null;
}
}.call();
// Grab a dead token
if (null == myInstance)
myInstance = new GetDeadToken().call();
// Grab a pre-generated token if there is such one
if (null == myInstance)
myInstance = new GetPregeneratedToken().call();
// Grab a new token
if (null == myInstance) {
GetNewToken newToken = new GetNewToken();
newToken.set(100, 100);
myInstance = newToken.call();
}
logger.info("My token: " + myInstance.getToken());
}
private void populateRacMap() {
locMap.clear();
for (AppsInstance ins : factory.getAllIds(envVariables.getDynomiteClusterName())) {
locMap.put(ins.getZone(), ins);
}
if (locMap.isEmpty()) {
logger.error("Could not find " + envVariables.getDynomiteClusterName() + " in the Cassandra cluster");
}
}
private List<String> getDualAccountRacMembership(List<String> asgInstances) {
logger.info("Dual Account cluster");
List<String> crossAccountAsgInstances = membership.getCrossAccountRacMembership();
if (insEnvIdentity.isClassic()) {
logger.info("EC2 classic instances (local ASG): " + Arrays.toString(asgInstances.toArray()));
logger.info("VPC Account (cross-account ASG): " + Arrays.toString(crossAccountAsgInstances.toArray()));
} else {
logger.info("VPC Account (local ASG): " + Arrays.toString(asgInstances.toArray()));
logger.info("EC2 classic instances (cross-account ASG): "
+ Arrays.toString(crossAccountAsgInstances.toArray()));
}
// Remove duplicates (probably there are not)
asgInstances.removeAll(crossAccountAsgInstances);
// Merge the two lists
asgInstances.addAll(crossAccountAsgInstances);
logger.info("Combined Instances in the AZ: " + asgInstances);
return asgInstances;
}
public class GetDeadToken extends RetryableCallable<AppsInstance> {
@Override
public AppsInstance retriableCall() throws Exception {
final List<AppsInstance> allIds = factory.getAllIds(envVariables.getDynomiteClusterName());
List<String> asgInstances = membership.getRacMembership();
if (aWSCommonConfig.isDualAccount()) {
asgInstances = getDualAccountRacMembership(asgInstances);
} else {
logger.info("Single Account cluster");
}
// Sleep random interval - upto 15 sec
sleeper.sleep(new Random().nextInt(5000) + 10000);
for (AppsInstance dead : allIds) {
// test same dc and is it is alive.
if (!dead.getRack().equals(envVariables.getRack()) || asgInstances.contains(dead.getInstanceId()))
continue;
logger.info("Found dead instances: " + dead.getInstanceId());
// AppsInstance markAsDead = factory.create(dead.getApp() +
// "-dead", dead.getId(), dead.getInstanceId(),
// dead.getHostName(), dead.getHostIP(), dead.getZone(),
// dead.getVolumes(), dead.getToken(), dead.getRack());
// remove it as we marked it down...
// factory.delete(dead);
isReplace = true;
replacedIp = dead.getHostIP();
String payLoad = dead.getToken();
logger.info("Trying to grab slot {} with availability zone {}", dead.getId(), dead.getZone());
return factory.create(envVariables.getDynomiteClusterName(), dead.getId(), retriever.getInstanceId(),
retriever.getPublicHostname(), commonConfig.getDynomitePort(), commonConfig.getDynomiteSecurePort(),
commonConfig.getDynomiteSecureStoragePort(), commonConfig.getDynomitePeerPort(), retriever.getPublicIP(),
retriever.getRac(), dead.getVolumes(), payLoad, envVariables.getRack());
}
return null;
}
public void forEachExecution() {
populateRacMap();
}
}
public class GetPregeneratedToken extends RetryableCallable<AppsInstance> {
@Override
public AppsInstance retriableCall() throws Exception {
logger.info("Looking for any pre-generated token");
final List<AppsInstance> allIds = factory.getAllIds(envVariables.getDynomiteClusterName());
List<String> asgInstances = membership.getRacMembership();
// Sleep random interval - upto 15 sec
sleeper.sleep(new Random().nextInt(5000) + 10000);
for (AppsInstance dead : allIds) {
// test same zone and is it is alive.
if (!dead.getRack().equals(envVariables.getRack()) || asgInstances.contains(dead.getInstanceId())
|| !isInstanceDummy(dead))
continue;
logger.info("Found pre-generated token: " + dead.getToken());
// AppsInstance markAsDead = factory.create(dead.getApp() +
// "-dead", dead.getId(), dead.getInstanceId(),
// dead.getHostName(), dead.getHostIP(), dead.getRack(),
// dead.getVolumes(),
// dead.getToken());
// remove it as we marked it down...
factory.delete(dead);
isTokenPregenerated = true;
String payLoad = dead.getToken();
logger.info("Trying to grab slot {} with availability zone {}", dead.getId(), dead.getRack());
return factory.create(envVariables.getDynomiteClusterName(), dead.getId(), retriever.getInstanceId(),
retriever.getPublicHostname(), commonConfig.getDynomitePort(), commonConfig.getDynomiteSecurePort(),
commonConfig.getDynomiteSecureStoragePort(), commonConfig.getDynomitePeerPort(), retriever.getPublicIP(), retriever.getRac(), dead.getVolumes(),
payLoad, envVariables.getRack());
}
return null;
}
public void forEachExecution() {
populateRacMap();
}
}
public class GetNewToken extends RetryableCallable<AppsInstance> {
public AppsInstance retriableCall() throws Exception {
// Sleep random interval - upto 15 sec
sleeper.sleep(new Random().nextInt(15000));
int hash = tokenManager.regionOffset(envVariables.getRack());
// use this hash so that the nodes are spred far away from the other
// regions.
String myInstanceId = retriever.getInstanceId();
List<String> asgInstanceIds = membership.getRacMembership();
logger.info("My Instance Id: " + myInstanceId);
for (String instanceId : asgInstanceIds) {
logger.info("InstanceId in ASG: " + instanceId);
}
int my_slot = asgInstanceIds.indexOf(myInstanceId);
logger.info("my_slot ::: " + my_slot);
isNewToken = true;
int rackMembershipSize;
if (aWSCommonConfig.isDualAccount()) {
rackMembershipSize = membership.getRacMembershipSize() + membership.getCrossAccountRacMembershipSize();
} else {
rackMembershipSize = membership.getRacMembershipSize();
}
logger.info(String.format(
"Trying to createToken with slot %d with rac count %d with rac membership size %d with dc %s",
my_slot, commonConfig.getRacks().size(), rackMembershipSize, envVariables.getRegion()));
// String payload = tokenManager.createToken(my_slot,
// membership.getRacCount(), membership.getRacMembershipSize(),
// config.getDataCenter());
String payload = tokenManager.createToken(my_slot, rackMembershipSize, envVariables.getRack());
return factory.create(envVariables.getDynomiteClusterName(), my_slot + hash, retriever.getInstanceId(),
retriever.getPublicHostname(), commonConfig.getDynomitePort(), commonConfig.getDynomiteSecurePort(),
commonConfig.getDynomiteSecureStoragePort(), commonConfig.getDynomitePeerPort(), retriever.getPublicIP(), retriever.getRac(), null,
payload, envVariables.getRack());
}
public void forEachExecution() {
populateRacMap();
}
}
/*
* public List<String> getSeeds1() throws UnknownHostException {
* populateRacMap(); List<String> seeds = new LinkedList<String>(); //
* Handle single zone deployment if (config.getRacs().size() == 1) { //
* Return empty list if all nodes are not up if
* (membership.getRacMembershipSize() !=
* locMap.get(myInstance.getRac()).size()) return seeds; // If seed node,
* return the next node in the list //if
* (locMap.get(myInstance.getRac()).size() > 1 &&
* locMap.get(myInstance.getRac()).get(0).getHostIP().equals(myInstance.
* getHostIP())) //{
* //seeds.add(locMap.get(myInstance.getRac()).get(1).getHostName());
* //seedp.add(seed + ":" + config.getPeerListenerPort() + ":" +
* config.getDataCenter() + ":5622637");
* seeds.add(locMap.get(myInstance.getRac()).get(1).getHostName() + ":" +
* config.getPeerListenerPort() + ":" + config.getDataCenter() + ":" +
* locMap.get(myInstance.getRac()).get(1).getToken()); //} } for (String loc
* : locMap.keySet()) { AppsInstance instance =
* Iterables.tryFind(locMap.get(loc), differentHostPredicate).orNull(); if
* (instance != null) { //seeds.add(instance.getHostName());
* seeds.add(instance.getHostName() + ":" + config.getPeerListenerPort() +
* ":" + config.getDataCenter() + ":" + instance.getToken()); } } return
* seeds; }
*/
public List<String> getSeeds() throws UnknownHostException {
// populateRacMap();
List<String> seeds = new LinkedList<String>();
for (AppsInstance ins : factory.getAllIds(envVariables.getDynomiteClusterName())) {
if (!ins.getInstanceId().equals(myInstance.getInstanceId())) {
logger.debug("Adding node: " + ins.getInstanceId());
seeds.add(ins.getHostName() + ":" + ins.getPeerPort() + ":" + ins.getRack() + ":"
+ ins.getDatacenter() + ":" + ins.getToken());
}
}
return seeds;
}
public List<AppsInstance> getClusterInfo() throws UnknownHostException {
List<AppsInstance> nodes = new LinkedList<AppsInstance>();
for (AppsInstance ins : factory.getAllIds(envVariables.getDynomiteClusterName())) {
nodes.add(ins);
}
return nodes;
}
public boolean isSeed() {
populateRacMap();
logger.info("Zone " + myInstance.getZone());
String ip = locMap.get(myInstance.getZone()).get(0).getHostName();
return myInstance.getHostName().equals(ip);
}
public boolean isReplace() {
return isReplace;
}
public boolean isTokenPregenerated() {
return isTokenPregenerated;
}
public boolean isNewToken() {
return isNewToken;
}
public String getReplacedIp() {
return replacedIp;
}
public String getTokens() {
return myInstance.getToken();
}
private boolean isInstanceDummy(AppsInstance instance) {
return instance.getInstanceId().equals(DUMMY_INSTANCE_ID);
}
}
| 3,109 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/identity/IMembership.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.identity;
import java.util.Collection;
import java.util.List;
/**
* Interface to manage membership meta information such as size of RAC, list of
* nodes in RAC etc. Also perform ACL updates used in multi-regional clusters
*/
public interface IMembership
{
/**
* Get a list of Instances in the current RAC
*/
public List<String> getRacMembership();
/**
* @return Get a list of Instances if there is a cross-account RAC
*/
public List<String> getCrossAccountRacMembership();
/**
* @return Size of current RAC
*/
public int getRacMembershipSize();
/**
* @return Size of the crossaccount RAC
*/
public int getCrossAccountRacMembershipSize();
/**
* Add security group ACLs
*
* @param listIPs
* @param from
* @param to
*/
public void addACL(Collection<String> listIPs, int from, int to);
/**
* Remove security group ACLs
*
* @param listIPs
* @param from
* @param to
*/
public void removeACL(Collection<String> listIPs, int from, int to);
/**
* List all ACLs
*/
public List<String> listACL(int from, int to);
/**
* Expand the membership size by 1.
*
* @param count
*/
public void expandRacMembership(int count);
} | 3,110 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/identity/AppsInstance.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.identity;
import java.io.Serializable;
import java.util.Map;
public class AppsInstance implements Serializable
{
private static final long serialVersionUID = 5606412386974488659L;
private String hostname;
private int dynomitePort;
private int dynomiteSecurePort;
private int dynomiteSecureStoragePort;
private int peerPort;
private long updatetime;
private boolean outOfService;
private String app;
private int Id;
private String instanceId;
private String availabilityZone;
private String rack;
private String publicip;
private String location;
private String token;
//Handles Storage objects
private Map<String, Object> volumes;
public String getApp()
{
return app;
}
public void setApp(String app)
{
this.app = app;
}
public int getId()
{
return Id;
}
public void setId(int id)
{
Id = id;
}
public String getInstanceId()
{
return instanceId;
}
public void setInstanceId(String instanceId)
{
this.instanceId = instanceId;
}
public String getZone()
{
return availabilityZone;
}
public void setZone(String availabilityZone)
{
this.availabilityZone = availabilityZone;
}
public String getHostName()
{
return hostname;
}
public String getHostIP()
{
return publicip;
}
public void setHost(String hostname, String publicip)
{
this.hostname = hostname;
this.publicip = publicip;
}
public void setHost(String hostname)
{
this.hostname = hostname;
}
public void setHostIP(String publicip)
{
this.publicip = publicip;
}
public String getToken()
{
return token;
}
public void setToken(String token)
{
this.token = token;
}
public Map<String, Object> getVolumes()
{
return volumes;
}
public void setVolumes(Map<String, Object> volumes)
{
this.volumes = volumes;
}
@Override
public String toString()
{
return String.format("Hostname: %s, InstanceId: %s, APP_NAME: %s, RAC : %s Location %s, Id: %s: Token: %s", getHostName(), getInstanceId(), getApp(), getZone(), getDatacenter(), getId(),
getToken());
}
public String getDatacenter()
{
return location;
}
public void setDatacenter(String dc)
{
this.location = dc;
}
public long getUpdatetime()
{
return updatetime;
}
public void setUpdatetime(long updatetime)
{
this.updatetime = updatetime;
}
public boolean isOutOfService()
{
return outOfService;
}
public void setOutOfService(boolean outOfService)
{
this.outOfService = outOfService;
}
public String getRack()
{
return rack;
}
public void setRack(String rack)
{
this.rack = rack;
}
public void setDynomitePort(int port) { this.dynomitePort = port; }
public int getDynomitePort() { return this.dynomitePort; }
public void setDynomiteSecurePort(int port) { this.dynomiteSecurePort = port; }
public int getDynomiteSecurePort() { return this.dynomiteSecurePort; }
public void setDynomiteSecureStoragePort(int port) { this.dynomiteSecureStoragePort = port; }
public int getDynomiteSecureStoragePort() { return this.dynomiteSecureStoragePort; }
public void setPeerPort(int port) { this.peerPort = port; }
public int getPeerPort() { return this.peerPort; }
}
| 3,111 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/identity/IInstanceState.java | package com.netflix.nfsidecar.identity;
public interface IInstanceState {
public boolean isSideCarProcessAlive();
public boolean isBootstrapping();
public boolean getYmlWritten();
public void setYmlWritten(boolean b);
public void setStorageProxyAlive(boolean isStorageProxyAlive);
} | 3,112 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/identity/InstanceEnvIdentity.java | package com.netflix.nfsidecar.identity;
/*
* A means to determine the environment for the running instance
*/
public interface InstanceEnvIdentity {
/*
* @return true if running instance is in "classic", false otherwise.
*/
public Boolean isClassic();
/*
* @return true if running instance is in VPC, under your default AWS account, false otherwise.
*/
public Boolean isDefaultVpc();
/*
* @return true if running instance is in VPC, under a specific AWS account, false otherwise.
*/
public Boolean isNonDefaultVpc();
public static enum InstanceEnvironent {
CLASSIC, DEFAULT_VPC, NONDEFAULT_VPC
};
}
| 3,113 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/config/CassCommonConfig.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.config;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
@Configuration(prefix = "dbsidecar.cass")
public interface CassCommonConfig {
/**
* @return Bootstrap cluster name (depends on another cass cluster)
*/
@DefaultValue("cass_turtle")
@PropertyName(name = "dyno.sidecore.clusterName")
public String getCassandraClusterName();
/**
* @return if Eureka is used to find the bootstrap cluster
*/
@DefaultValue("false")
@PropertyName(name = "dyno.sidecore.eureka.enabled")
public boolean isEurekaHostsSupplierEnabled();
/**
* @return the port that the bootstrap cluster can be contacted
*/
@DefaultValue("7102")
@PropertyName(name = "dyno.sidecore.port")
public int getCassandraThriftPort();
@DefaultValue("127.0.0.1")
@PropertyName(name = "dyno.sidecore.seeds")
public String getCassandraSeeds();
/**
* Get the name of the keyspace that stores tokens for the Dynomite cluster.
*
* @return the keyspace name
*/
@DefaultValue("dyno_bootstrap")
@PropertyName(name = "metadata.keyspace")
public String getCassandraKeyspaceName();
/**
* @return the refresh interval in msecs for getting the tokens
* 0 value means, do not cache the tokens. Every query to Dynomite-manager
* to get tokens will be forwarded to the token store
*/
@DefaultValue("0")
@PropertyName(name = "dyno.sidecore.tokenRefreshInterval")
public long getTokenRefreshInterval();
}
| 3,114 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/config/AWSCommonConfig.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.config;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
@Configuration(prefix = "dbsidecar.aws")
public interface AWSCommonConfig {
// Dual Account
/*
* @return the Amazon Resource Name (ARN) for EC2 classic.
*/
@DefaultValue("null")
@PropertyName(name = "ec2.roleassumption.arn")
public String getClassicAWSRoleAssumptionArn();
/*
* @return the Amazon Resource Name (ARN) for VPC.
*/
@DefaultValue("null")
@PropertyName(name = "vpc.roleassumption.arn")
public String getVpcAWSRoleAssumptionArn();
@DefaultValue("false")
@PropertyName(name = "roleassumption.dualaccount")
public boolean isDualAccount();
// Backup and Restore
@DefaultValue("us-east-1.dynomite-backup-test")
@PropertyName(name = "dyno.backup.bucket.name") // TODO: For a common
// default value we probably
// have to result to defined
// FP
public String getBucketName();
@DefaultValue("backup")
@PropertyName(name = "dyno.backup.s3.base_dir") // TODO: For a common
// default value we probably
// have to result to defined
// FP
public String getBackupLocation();
}
| 3,115 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/config/CommonConfig.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.config;
import java.util.List;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
@Configuration(prefix = "dbsidecar.common")
public interface CommonConfig {
/**
* @return Get the Region name
*/
@DefaultValue("")
@PropertyName(name = "region")
public String getRegion();
@DefaultValue("")
@PropertyName(name = "rack")
public String getRack();
@PropertyName(name = "zones.available")
public List<String> getRacks();
/**
* Get the security group associated with nodes in this cluster
*/
@PropertyName(name = "acl.groupname")
public String getACLGroupName();
/*****************************************************************/
/**
* Get the peer-to-peer port used by Dynomite to communicate with other
* Dynomite nodes.
*
* @return the peer-to-peer port used for intra-cluster communication
*/
@DefaultValue("8101")
@PropertyName(name = "dyno.peer.port")
public int getDynomitePeerPort();
@DefaultValue("8102")
@PropertyName(name = "dyno.port")
public int getDynomitePort();
@DefaultValue("22222")
@PropertyName(name = "dyno.stats.port")
public int getDynomiteStatsPort();
@DefaultValue("true")
@PropertyName(name = "dyno.stats.localhost.only")
public boolean isDynomiteStatsLocalHostOnly();
// Default value of -1 means it is not having a secure port
@DefaultValue("-1")
@PropertyName(name = "dyno.secure.port")
public int getDynomiteSecurePort();
// Default value of -1 means it is not having a secure port
@DefaultValue("-1")
@PropertyName(name = "dyno.secure.storage.port")
public int getDynomiteSecureStoragePort();
@DefaultValue("false")
@PropertyName(name = "dyno.backup.snapshot.enabled")
public boolean isBackupEnabled();
@DefaultValue("false")
@PropertyName(name = "dyno.backup.restore.enabled")
public boolean isRestoreEnabled();
@DefaultValue("day")
@PropertyName(name = "dyno.backup.schedule")
public String getBackupSchedule();
@DefaultValue("12")
@PropertyName(name = "dyno.backup.hour")
public int getBackupHour();
@DefaultValue("20101010")
@PropertyName(name = "dyno.backup.restore.date")
public String getRestoreDate();
}
| 3,116 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/resources/SecurityGroupAdmin.java | package com.netflix.nfsidecar.resources;
import java.util.Collections;
import javax.ws.rs.DELETE;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.netflix.nfsidecar.identity.IMembership;
/**
* This http endpoint allows direct updates (adding/removing) (CIDR) IP addresses and port
* ranges to the security group for this app.
*/
@Path("/v1/secgroup")
@Produces(MediaType.TEXT_PLAIN)
public class SecurityGroupAdmin
{
private static final Logger log = LoggerFactory.getLogger(SecurityGroupAdmin.class);
private static final String CIDR_TAG = "/32";
private final IMembership membership;
@Inject
public SecurityGroupAdmin(IMembership membership)
{
this.membership = membership;
}
@POST
public Response addACL(@QueryParam("ip") String ipAddr, @QueryParam("fromPort") int fromPort, @QueryParam("toPort") int toPort)
{
if(!ipAddr.endsWith(CIDR_TAG))
ipAddr += CIDR_TAG;
try
{
membership.addACL(Collections.singletonList(ipAddr), fromPort, toPort);
}
catch(Exception e)
{
log.error("Error while trying to add an ACL to a security group", e);
return Response.serverError().build();
}
return Response.ok().build();
}
@DELETE
public Response removeACL(@QueryParam("ip") String ipAddr, @QueryParam("fromPort") int fromPort, @QueryParam("toPort") int toPort)
{
if(!ipAddr.endsWith(CIDR_TAG))
ipAddr += CIDR_TAG;
try
{
membership.removeACL(Collections.singletonList(ipAddr), fromPort, toPort);
}
catch(Exception e)
{
log.error("Error while trying to remove an ACL to a security group", e);
return Response.serverError().build();
}
return Response.ok().build();
}
}
| 3,117 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/resources | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/resources/env/IEnvVariables.java | package com.netflix.nfsidecar.resources.env;
public interface IEnvVariables {
public String getDynomiteClusterName();
public String getRegion();
public String getRack();
}
| 3,118 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/resources | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/resources/env/InstanceEnvVariables.java | package com.netflix.nfsidecar.resources.env;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.netflix.nfsidecar.config.CommonConfig;
public class InstanceEnvVariables implements IEnvVariables {
private static final Logger logger = LoggerFactory.getLogger(InstanceEnvVariables.class);
CommonConfig config;
@Inject
public InstanceEnvVariables(CommonConfig config) {
this.config = config;
}
@Override
public String getDynomiteClusterName() {
String clusterName = System.getenv("NETFLIX_APP");
/* if (StringUtils.isBlank(clusterName)) {
logger.warn("Cluster name variable not defined. Falling back to FP " + config.getDynomiteClusterName());
clusterName = config.getDynomiteClusterName();
}
*/
return clusterName;
}
@Override
public String getRegion() {
String region = System.getenv("EC2_REGION");
if (StringUtils.isBlank(region)) {
logger.warn("Region environment variable not defined. Falling back to " + config.getRegion());
region = config.getRegion();
}
return region;
}
@Override
public String getRack() {
String rack = System.getenv("NETFLIX_AUTO_SCALE_GROUP");
if (StringUtils.isBlank(rack)) {
logger.error("Rack environment variable not defined. Falling back to " + config.getRack());
rack = config.getRack();
}
return rack;
}
}
| 3,119 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/AbstractConfigSource.java | package com.netflix.nfsidecar.configSource;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.commons.lang.StringUtils;
import java.util.List;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Base implementations for most methods on {@link IConfigSource}.
*/
public abstract class AbstractConfigSource implements IConfigSource
{
private String asgName;
private String region;
@Override
public void intialize(final String asgName, final String region)
{
this.asgName = checkNotNull(asgName, "ASG name is not defined");
this.region = checkNotNull(region, "Region is not defined");
}
@Override
public boolean contains(final String key)
{
return get(key) != null;
}
@Override
public boolean isEmpty()
{
return size() == 0;
}
@Override
public String get(final String key, final String defaultValue)
{
final String value = get(key);
return (value != null) ? value : defaultValue;
}
@Override
public boolean get(final String key, final boolean defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Boolean.parseBoolean(value);
}
catch (Exception e) {
// ignore and return default
}
}
return defaultValue;
}
@Override
public Class<?> get(final String key, final Class<?> defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Class.forName(value);
}
catch (ClassNotFoundException e)
{
// ignore and return default
}
}
return defaultValue;
}
@Override
public <T extends Enum<T>> T get(final String key, final T defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Enum.valueOf(defaultValue.getDeclaringClass(), value);
}
catch (Exception e)
{
// ignore and return default.
}
}
return defaultValue;
}
@Override
public int get(final String key, final int defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Integer.parseInt(value);
}
catch (Exception e)
{
// ignore and return default
}
}
return defaultValue;
}
@Override
public long get(final String key, final long defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Long.parseLong(value);
}
catch (Exception e)
{
// return default.
}
}
return defaultValue;
}
@Override
public float get(final String key, final float defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Float.parseFloat(value);
}
catch (Exception e)
{
// ignore and return default;
}
}
return defaultValue;
}
@Override
public double get(final String key, final double defaultValue)
{
final String value = get(key);
if (value != null)
{
try
{
return Double.parseDouble(value);
}
catch (Exception e)
{
// ignore and return default.
}
}
return defaultValue;
}
@Override
public List<String> getList(String prop)
{
return getList(prop, ImmutableList.<String>of());
}
@Override
public List<String> getList(String prop, List<String> defaultValue)
{
final String value = get(prop);
if (value != null)
{
return getTrimmedStringList(value.split(","));
}
return defaultValue;
}
protected String getAsgName()
{
return asgName;
}
protected String getRegion()
{
return region;
}
private List<String> getTrimmedStringList(String[] strings)
{
List<String> list = Lists.newArrayList();
for (String s : strings)
{
list.add(StringUtils.strip(s));
}
return list;
}
}
| 3,120 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/MemoryConfigSource.java | package com.netflix.nfsidecar.configSource;
import com.google.common.collect.Maps;
import java.util.Map;
public final class MemoryConfigSource extends AbstractConfigSource
{
private final Map<String, String> data = Maps.newConcurrentMap();
@Override
public int size()
{
return data.size();
}
@Override
public String get(final String key)
{
return data.get(key);
}
@Override
public void set(final String key, final String value)
{
data.put(key, value);
}
}
| 3,121 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/IConfigSource.java | package com.netflix.nfsidecar.configSource;
import java.util.List;
import com.google.inject.ImplementedBy;
/**
* Defines the configurations for an application.
*/
@ImplementedBy(DefaultConfigSource.class)
public interface IConfigSource
{
/**
* Must be called before any other method. This method will allow implementations to do any setup that they require
* before being called.
*/
void intialize(String asgName, String region);
/**
* A non-negative integer indicating a count of elements.
*
* @return non-negative integer indicating a count of elements.
*/
int size();
/**
* Returns {@code true} if the size is zero. May be more efficient than calculating size.
*
* @return {@code true} if the size is zero otherwise {@code false}.
*/
boolean isEmpty();
/**
* Check if the given key can be found in the config.
*
* @param key to look up value.
* @return if the key is present
*/
boolean contains(String key);
/**
* Get a String associated with the given configuration key.
*
* @param key to look up value.
* @return value from config or null if not present.
*/
String get(String key);
/**
* Get a String associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
String get(String key, String defaultValue);
/**
* Get a boolean associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
boolean get(String key, boolean defaultValue);
/**
* Get a Class associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
Class<?> get(String key, Class<?> defaultValue);
/**
* Get a Enum associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @param <T> enum type.
* @return value from config or defaultValue if not present.
*/
<T extends Enum<T>> T get(String key, T defaultValue);
/**
* Get a int associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
int get(String key, int defaultValue);
/**
* Get a long associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
long get(String key, long defaultValue);
/**
* Get a float associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
float get(String key, float defaultValue);
/**
* Get a double associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
double get(String key, double defaultValue);
/**
* Get a list of strings associated with the given configuration key.
*
* @param key to look up value.
* @return value from config or an immutable list if not present.
*/
List<String> getList(String key);
/**
* Get a list of strings associated with the given configuration key.
*
* @param key to look up value.
* @param defaultValue if value is not present.
* @return value from config or defaultValue if not present.
*/
List<String> getList(String key, List<String> defaultValue);
/**
* Set the value for the given key.
*
* @param key to set value for.
* @param value to set.
*/
void set(String key, String value);
}
| 3,122 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/PropertiesConfigSource.java | package com.netflix.nfsidecar.configSource;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URL;
import java.util.Map;
import java.util.Properties;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Loads the 'florida.properties' file as a source.
*/
public class PropertiesConfigSource extends AbstractConfigSource
{
private static final Logger logger = LoggerFactory.getLogger(PropertiesConfigSource.class.getName());
private static final String DEFAULT_FLORIDA_PROPERTIES = "florida.properties";
private final Map<String, String> data = Maps.newConcurrentMap();
private final String appsFile;
public PropertiesConfigSource()
{
this.appsFile = DEFAULT_FLORIDA_PROPERTIES;
}
public PropertiesConfigSource(final Properties properties)
{
checkNotNull(properties);
this.appsFile = DEFAULT_FLORIDA_PROPERTIES;
clone(properties);
}
@VisibleForTesting
PropertiesConfigSource(final String file)
{
this.appsFile = checkNotNull(file);
}
@Override
public void intialize(final String asgName, final String region)
{
super.intialize(asgName, region);
Properties properties = new Properties();
URL url = PropertiesConfigSource.class.getClassLoader().getResource(appsFile);
if (url != null)
{
try
{
properties.load(url.openStream());
clone(properties);
}
catch (IOException e)
{
logger.info("No Dynomite.properties. Ignore!");
}
}
else
{
logger.info("No Dynomite.properties. Ignore!");
}
}
@Override
public String get(final String prop)
{
return data.get(prop);
}
@Override
public void set(final String key, final String value)
{
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
data.put(key, value);
}
@Override
public int size()
{
return data.size();
}
@Override
public boolean contains(final String prop)
{
return data.containsKey(prop);
}
/**
* Clones all the values from the properties. If the value is null, it will be ignored.
*
* @param properties to clone
*/
private void clone(final Properties properties)
{
if (properties.isEmpty()) return;
synchronized (properties)
{
for (final String key : properties.stringPropertyNames())
{
final String value = properties.getProperty(key);
if (!Strings.isNullOrEmpty(value))
{
data.put(key, value);
}
}
}
}
}
| 3,123 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/CompositeConfigSource.java | package com.netflix.nfsidecar.configSource;
import java.util.Collection;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import com.google.inject.Singleton;
/**
* A {@link IConfigSource} that delegates method calls to the underline sources. The order in which values are provided
* depend on the {@link IConfigSource}s provided. If user asks for key 'foo', and this composite has three sources, it
* will first check if the key is found in the first source, if not it will check the second and if not, the third, else
* return null or false if {@link #contains(String)} was called.
*
* Implementation note: get methods with a default are implemented in {@link AbstractConfigSource}, if the underlying
* source overrides one of these methods, then that implementation will be ignored.
*/
@Singleton
public class CompositeConfigSource extends AbstractConfigSource
{
private final ImmutableCollection<? extends IConfigSource> sources;
@Inject
public CompositeConfigSource(final ImmutableCollection<? extends IConfigSource> sources)
{
Preconditions.checkArgument(!sources.isEmpty(), "Can not create a composite config source without config sources!");
this.sources = sources;
}
public CompositeConfigSource(final Collection<? extends IConfigSource> sources)
{
this(ImmutableList.copyOf(sources));
}
public CompositeConfigSource(final Iterable<? extends IConfigSource> sources)
{
this(ImmutableList.copyOf(sources));
}
public CompositeConfigSource(final IConfigSource... sources)
{
this(ImmutableList.copyOf(sources));
}
@Override
public void intialize(final String asgName, final String region)
{
for (final IConfigSource source : sources)
{
//TODO should this catch any potential exceptions?
source.intialize(asgName, region);
}
}
@Override
public int size()
{
int size = 0;
for (final IConfigSource c : sources)
{
size += c.size();
}
return size;
}
@Override
public boolean isEmpty()
{
return size() == 0;
}
@Override
public boolean contains(final String key)
{
return get(key) != null;
}
@Override
public String get(final String key)
{
Preconditions.checkNotNull(key);
for (final IConfigSource c : sources)
{
final String value = c.get(key);
if (value != null)
{
return value;
}
}
return null;
}
@Override
public void set(final String key, final String value)
{
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
final IConfigSource firstSource = Iterables.getFirst(sources, null);
// firstSource shouldn't be null because the collection is immutable, and the collection is non empty.
Preconditions.checkState(firstSource != null, "There was no IConfigSource found at the first location?");
firstSource.set(key, value);
}
}
| 3,124 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/DefaultConfigSource.java | package com.netflix.nfsidecar.configSource;
import javax.inject.Inject;
public class DefaultConfigSource extends CompositeConfigSource {
@Inject
public DefaultConfigSource(final PropertiesConfigSource simpleDBConfigSource,
final PropertiesConfigSource propertiesConfigSource,
final SystemPropertiesConfigSource systemPropertiesConfigSource) {
super(simpleDBConfigSource, propertiesConfigSource, systemPropertiesConfigSource);
}
}
| 3,125 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/configSource/SystemPropertiesConfigSource.java | package com.netflix.nfsidecar.configSource;
import java.util.Map;
import java.util.Properties;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
/**
* Loads {@link System#getProperties()} as a source.
*
* Implementation note: {@link #set(String, String)} does not write to system properties, but will write to a new map.
* This means that setting values to this source has no effect on system properties or other instances of this class.
*/
public final class SystemPropertiesConfigSource extends AbstractConfigSource
{
private static final String BLANK = "";
private final Map<String, String> data = Maps.newConcurrentMap();
@Override
public void intialize(final String asgName, final String region)
{
super.intialize(asgName, region);
Properties systemProps = System.getProperties();
for (final String key : systemProps.stringPropertyNames())
{
final String value = systemProps.getProperty(key);
if (value != null && !BLANK.equals(value))
{
data.put(key, value);
}
}
}
@Override
public int size()
{
return data.size();
}
@Override
public String get(final String key)
{
return data.get(key);
}
@Override
public void set(final String key, final String value)
{
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
data.put(key, value);
}
}
| 3,126 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/guice/GuiceContext.java | package com.netflix.nfsidecar.guice;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Injector;
/**
* A holder class around the Governator Guice {@code Injector}
*/
public class GuiceContext {
private static final Logger logger = LoggerFactory.getLogger(GuiceContext.class);
private static final GuiceContext INSTANCE = new GuiceContext();
private Injector injector;
private GuiceContext(){}
/*
* IMPORTANT: must be invoked when the web app starts (@see PriamLifecycleListener.initialize())
*/
public static void setInjector(Injector val) {
if (INSTANCE.injector == null) {
synchronized(GuiceContext.class) {
if (INSTANCE.injector == null) {
INSTANCE.injector = val;
}
}
}
}
public static Injector getInjector()
{
if (INSTANCE.injector == null) {
throw new IllegalStateException("The injector is null. It should have been set when the web app starts (in some listener such as PriamLifecycleListener.initialize()");
}
logger.info("The injector provided has id: " + INSTANCE.injector.hashCode());
return INSTANCE.injector;
}
} | 3,127 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/TaskTimer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import java.text.ParseException;
import org.quartz.Trigger;
/**
* Interface to represent time/interval
*/
public interface TaskTimer
{
public Trigger getTrigger() throws ParseException;
}
| 3,128 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/CronTimer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import java.text.ParseException;
import org.quartz.CronTrigger;
import org.quartz.Scheduler;
import org.quartz.Trigger;
/**
* Runs jobs at the specified absolute time and frequency
*/
public class CronTimer implements TaskTimer
{
private String cronExpression;
public enum DayOfWeek
{
SUN, MON, TUE, WED, THU, FRI, SAT
}
/**
* Hourly cron.
*/
public CronTimer(int minute, int sec)
{
cronExpression = sec + " " + minute + " * * * ?";
}
/**
* Daily Cron
*/
public CronTimer(int hour, int minute, int sec)
{
cronExpression = sec + " " + minute + " " + hour + " * * ?";
}
/**
* Weekly cron jobs
*/
public CronTimer(DayOfWeek dayofweek, int hour, int minute, int sec)
{
cronExpression = sec + " " + minute + " " + hour + " * * " + dayofweek;
}
/**
* Cron Expression.
*/
public CronTimer(String expression)
{
this.cronExpression = expression;
}
public Trigger getTrigger() throws ParseException
{
return new CronTrigger("CronTrigger", Scheduler.DEFAULT_GROUP, cronExpression);
}
}
| 3,129 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/GuiceJobFactory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import org.quartz.Job;
import org.quartz.JobDetail;
import org.quartz.SchedulerException;
import org.quartz.spi.JobFactory;
import org.quartz.spi.TriggerFiredBundle;
import com.google.inject.Inject;
import com.google.inject.Injector;
public class GuiceJobFactory implements JobFactory
{
public final Injector guice;
@Inject
public GuiceJobFactory(Injector guice)
{
this.guice = guice;
}
@Override
public Job newJob(TriggerFiredBundle bundle) throws SchedulerException
{
JobDetail jobDetail = bundle.getJobDetail();
Class<?> jobClass = jobDetail.getJobClass();
Job job = (Job) guice.getInstance(jobClass);
guice.injectMembers(job);
return job;
}
}
| 3,130 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/TaskScheduler.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import java.text.ParseException;
import org.quartz.JobDetail;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.nfsidecar.utils.Sleeper;
/**
* Scheduling class to schedule Florida tasks. Uses Quartz scheduler
*/
@Singleton
public class TaskScheduler
{
private static final Logger logger = LoggerFactory.getLogger(TaskScheduler.class);
private final Scheduler scheduler;
private final GuiceJobFactory jobFactory;
private final Sleeper sleeper;
@Inject
public TaskScheduler(SchedulerFactory factory, GuiceJobFactory jobFactory, Sleeper sleeper)
{
try
{
this.scheduler = factory.getScheduler();
this.scheduler.setJobFactory(jobFactory);
this.jobFactory = jobFactory;
}
catch (SchedulerException e)
{
throw new RuntimeException(e);
}
this.sleeper = sleeper;
}
/**
* Add a task to the scheduler
*/
public void addTask(String name, Class<? extends Task> taskclass, TaskTimer timer) throws SchedulerException, ParseException
{
assert timer != null : "Cannot add scheduler task " + name + " as no timer is set";
JobDetail job = new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass);
scheduler.scheduleJob(job, timer.getTrigger());
}
/**
* Add a delayed task to the scheduler
*/
public void addTaskWithDelay(final String name, Class<? extends Task> taskclass, final TaskTimer timer, final int delayInSeconds) throws SchedulerException, ParseException
{
assert timer != null : "Cannot add scheduler task " + name + " as no timer is set";
final JobDetail job = new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass);
new Thread(new Runnable(){
public void run()
{
try
{
sleeper.sleepQuietly(delayInSeconds * 1000L);
scheduler.scheduleJob(job, timer.getTrigger());
}
catch (SchedulerException e)
{
logger.warn("problem occurred while scheduling a job with name " + name, e);
}
catch (ParseException e)
{
logger.warn("problem occurred while parsing a job with name " + name, e);
}
}
}).start();
}
public void runTaskNow(Class<? extends Task> taskclass) throws Exception
{
jobFactory.guice.getInstance(taskclass).execute(null);
}
public void deleteTask(String name) throws SchedulerException, ParseException
{
scheduler.deleteJob(name, Scheduler.DEFAULT_GROUP);
}
public final Scheduler getScheduler()
{
return scheduler;
}
public void shutdown()
{
try
{
scheduler.shutdown();
}
catch (SchedulerException e)
{
throw new RuntimeException(e);
}
}
public void start()
{
try
{
scheduler.start();
}
catch (SchedulerException ex)
{
throw new RuntimeException(ex);
}
}
}
| 3,131 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/BlockingSubmitThreadPoolExecutor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
/**
* {@link ThreadPoolExecutor} that will block in the {@code submit()} method
* until the task can be successfully added to the queue.
*/
public class BlockingSubmitThreadPoolExecutor extends ThreadPoolExecutor
{
private static final long DEFAULT_SLEEP = 100;
private static final long DEFAULT_KEEP_ALIVE = 100;
private static final Logger logger = LoggerFactory.getLogger(BlockingSubmitThreadPoolExecutor.class);
private BlockingQueue<Runnable> queue;
private long giveupTime;
private AtomicInteger active;
public BlockingSubmitThreadPoolExecutor(int maximumPoolSize, BlockingQueue<Runnable> workQueue, long timeoutAdding)
{
super(maximumPoolSize, maximumPoolSize, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, workQueue);
this.queue = workQueue;
this.giveupTime = timeoutAdding;
this.active = new AtomicInteger(0);
}
/**
* This is a thread safe way to avoid rejection exception... this is
* implemented because we might want to hold the incoming requests till
* there is a free thread.
*/
@Override
public <T> Future<T> submit(Callable<T> task)
{
synchronized (this)
{
active.incrementAndGet();
long timeout = 0;
while (queue.remainingCapacity() == 0)
{
try
{
if (timeout <= giveupTime)
{
Thread.sleep(DEFAULT_SLEEP);
timeout += DEFAULT_SLEEP;
}
else
{
throw new RuntimeException("Timed out because TPE is too busy...");
}
}
catch (InterruptedException e)
{
throw new RuntimeException(e);
}
}
return super.submit(task);
}
}
@Override
protected void afterExecute(Runnable r, Throwable t)
{
super.afterExecute(r, t);
active.decrementAndGet();
}
/**
* blocking call to test if the threads are done or not.
*/
public void sleepTillEmpty()
{
long timeout = 0;
while (!queue.isEmpty() || (active.get() > 0))
{
try
{
if (timeout <= giveupTime)
{
Thread.sleep(DEFAULT_SLEEP);
timeout += DEFAULT_SLEEP;
logger.debug("After Sleeping for empty: {}, Count: {}", +queue.size(), active.get());
}
else
{
throw new RuntimeException("Timed out because TPE is too busy...");
}
}
catch (InterruptedException e)
{
throw new RuntimeException(e);
}
}
}
}
| 3,132 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/ExecutionException.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
public class ExecutionException extends Exception
{
private static final long serialVersionUID = 1L;
public ExecutionException(String msg, Throwable th)
{
super(msg, th);
}
public ExecutionException(String msg)
{
super(msg);
}
public ExecutionException(Exception ex)
{
super(ex);
}
public ExecutionException(Throwable th)
{
super(th);
}
}
| 3,133 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/NamedThreadPoolExecutor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
public class NamedThreadPoolExecutor extends ThreadPoolExecutor
{
public NamedThreadPoolExecutor(int poolSize, String poolName)
{
this(poolSize, poolName, new LinkedBlockingQueue<Runnable>());
}
public NamedThreadPoolExecutor(int poolSize, String poolName, BlockingQueue<Runnable> queue)
{
super(poolSize, poolSize, 1000, TimeUnit.MILLISECONDS, queue,
new ThreadFactoryBuilder().setDaemon(true).setNameFormat(poolName + "-%d").build(),
new LocalRejectedExecutionHandler(queue));
}
private static class LocalRejectedExecutionHandler implements RejectedExecutionHandler
{
private final BlockingQueue<Runnable> queue;
LocalRejectedExecutionHandler(BlockingQueue<Runnable> queue)
{
this.queue = queue;
}
public void rejectedExecution(Runnable task, ThreadPoolExecutor executor)
{
while (true)
{
if (executor.isShutdown())
throw new RejectedExecutionException("ThreadPoolExecutor has shut down");
try
{
if (queue.offer(task, 1000, TimeUnit.MILLISECONDS))
break;
}
catch (InterruptedException e)
{
//NOP
}
}
}
}
}
| 3,134 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/Task.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import com.google.common.base.Throwables;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Task class that should be implemented by all cron tasks. Jobconf will contain
* any instance specific data
*
* NOTE: Constructor must not throw any exception. This will cause Quartz to set the job to failure
*/
public abstract class Task implements Job, TaskMBean
{
public STATE status = STATE.DONE;
public static enum STATE
{
ERROR, RUNNING, DONE
}
private static final Logger logger = LoggerFactory.getLogger(Task.class);
private final AtomicInteger errors = new AtomicInteger();
private final AtomicInteger executions = new AtomicInteger();
protected Task()
{
this(ManagementFactory.getPlatformMBeanServer());
}
protected Task(MBeanServer mBeanServer) {
// TODO: don't do mbean registration here
String mbeanName = "com.netflix.florida.scheduler:type=" + this.getClass().getName();
try
{
mBeanServer.registerMBean(this, new ObjectName(mbeanName));
initialize();
}
catch (Exception e)
{
throw Throwables.propagate(e);
}
}
/**
* This method has to be implemented and cannot thow any exception.
*/
public void initialize() throws ExecutionException
{
// nothing to initialize
}
public abstract void execute() throws Exception;
/**
* Main method to execute a task
*/
public void execute(JobExecutionContext context) throws JobExecutionException
{
executions.incrementAndGet();
try
{
if (status == STATE.RUNNING)
return;
status = STATE.RUNNING;
execute();
}
catch (Exception e)
{
status = STATE.ERROR;
logger.error("Couldn't execute the task because of: " + e.getMessage(), e);
errors.incrementAndGet();
}
catch (Throwable e)
{
status = STATE.ERROR;
logger.error("Couldnt execute the task because of: " + e.getMessage(), e);
errors.incrementAndGet();
}
if (status != STATE.ERROR)
status = STATE.DONE;
}
public STATE state()
{
return status;
}
public int getErrorCount()
{
return errors.get();
}
public int getExecutionCount()
{
return executions.get();
}
public abstract String getName();
}
| 3,135 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/TaskMBean.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
/**
* MBean to monitor Task executions.
*
*/
public interface TaskMBean
{
public int getErrorCount();
public int getExecutionCount();
public String getName();
}
| 3,136 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/scheduler/SimpleTimer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.scheduler;
import java.text.ParseException;
import java.util.Date;
import org.quartz.Scheduler;
import org.quartz.SimpleTrigger;
import org.quartz.Trigger;
/**
* SimpleTimer allows jobs to run starting from specified time occurring at
* regular frequency's. Frequency of the execution timestamp since epoch.
*/
public class SimpleTimer implements TaskTimer
{
private SimpleTrigger trigger;
public SimpleTimer(String name, long interval)
{
this.trigger = new SimpleTrigger(name, SimpleTrigger.REPEAT_INDEFINITELY, interval);
}
/**
* Run once at given time...
*/
public SimpleTimer(String name, String group, long startTime)
{
this.trigger = new SimpleTrigger(name, group, new Date(startTime));
}
/**
* Run immediately and dont do that again.
*/
public SimpleTimer(String name)
{
this.trigger = new SimpleTrigger(name, Scheduler.DEFAULT_GROUP);
}
public Trigger getTrigger() throws ParseException
{
trigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW);
return trigger;
}
}
| 3,137 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/SystemUtils.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FilterInputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
public class SystemUtils
{
private static final Logger logger = LoggerFactory.getLogger(SystemUtils.class);
/**
* REST call
* @param url
* @return the response from the HTTP GET
*/
public static String getDataFromUrl(String url)
{
try
{
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
conn.setConnectTimeout(1000);
conn.setReadTimeout(1000);
conn.setRequestMethod("GET");
if (conn.getResponseCode() != 200)
{
throw new RuntimeException("Unable to get data for URL " + url);
}
byte[] b = new byte[2048];
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataInputStream d = new DataInputStream((FilterInputStream) conn.getContent());
int c = 0;
while ((c = d.read(b, 0, b.length)) != -1)
bos.write(b, 0, c);
String return_ = new String(bos.toByteArray(), Charsets.UTF_8);
logger.info("Calling URL API: {} returns: {}", url, return_);
conn.disconnect();
return return_;
}
catch (Exception ex)
{
throw new RuntimeException(ex);
}
}
/**
* delete all the files/dirs in the given Directory but dont delete the dir
* itself.
*/
public static void cleanupDir(String dirPath, List<String> childdirs) throws IOException
{
if (childdirs == null || childdirs.size() == 0)
FileUtils.cleanDirectory(new File(dirPath));
else
{
for (String cdir : childdirs)
FileUtils.cleanDirectory(new File(dirPath + "/" + cdir));
}
}
}
| 3,138 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/NFException.java | package com.netflix.nfsidecar.utils;
public class NFException {
private final String cfKey;
private final String pathName;
private final String stacktrace;
public NFException(String cfKey,String pathName,String stacktrace)
{
this.cfKey = cfKey;
this.pathName = pathName;
this.stacktrace = stacktrace;
}
public String getCfKey() {
return cfKey;
}
public String getPathName() {
return pathName;
}
public String getStacktrace() {
return stacktrace;
}
}
| 3,139 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/ITokenManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import com.google.inject.ImplementedBy;
import java.math.BigInteger;
import java.util.List;
@ImplementedBy(TokenManager.class)
public interface ITokenManager
{
String createToken(int mySlot, int racCount, int racSize, String region);
String createToken(int mySlot, int totalCount, String region);
BigInteger findClosestToken(BigInteger tokenToSearch, List<BigInteger> tokenList);
int regionOffset(String region);
}
| 3,140 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/FifoQueue.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import java.util.Comparator;
import java.util.TreeSet;
public class FifoQueue<E extends Comparable<E>> extends TreeSet<E>
{
private static final long serialVersionUID = -7388604551920505669L;
private int capacity;
public FifoQueue(int capacity)
{
super(new Comparator<E>()
{
@Override
public int compare(E o1, E o2)
{
return o1.compareTo(o2);
}
});
this.capacity = capacity;
}
public FifoQueue(int capacity, Comparator<E> comparator)
{
super(comparator);
this.capacity = capacity;
}
public synchronized void adjustAndAdd(E e)
{
add(e);
if (capacity < size())
pollFirst();
}
}
| 3,141 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/TokenManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import java.math.BigInteger;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Ordering;
public class TokenManager implements ITokenManager
{
public static final BigInteger MINIMUM_TOKEN = BigInteger.ZERO;
//Currently using murmur so max value is 2^32 - 1
public static final BigInteger MAXIMUM_TOKEN = new BigInteger("2").pow(32).add(new BigInteger("-1"));
/**
* Calculate a token for the given position, evenly spaced from other size-1 nodes. See
*
* @param size number of slots by which the token space will be divided
* @param position slot number, multiplier
* @param offset added to token
* @return MAXIMUM_TOKEN / size * position + offset, if <= MAXIMUM_TOKEN, otherwise wrap around the MINIMUM_TOKEN
*/
@VisibleForTesting BigInteger initialToken(int size, int position, int offset)
{
Preconditions.checkArgument(size > 0, "size must be > 0");
Preconditions.checkArgument(offset >= 0, "offset must be >= 0");
/*
* TODO: Is this it valid to add "&& position < size" to the following precondition? This currently causes
* unit test failures.
*/
Preconditions.checkArgument(position >= 0, "position must be >= 0");
return MAXIMUM_TOKEN.divide(BigInteger.valueOf(size))
.multiply(BigInteger.valueOf(position))
.add(BigInteger.valueOf(offset)).mod(MAXIMUM_TOKEN);
}
/**
* Creates a token given the following parameter
*
* @param my_slot
* -- Slot where this instance has to be.
* @param rac_count
* -- Rac count is the numeber of RAC's
* @param rac_size
* -- number of memberships in the rac
* @param region
* -- name of the DC where it this token is created.
*/
@Override
public String createToken(int my_slot, int rac_count, int rac_size, String region)
{
int regionCount = rac_count * rac_size;
return initialToken(regionCount, my_slot, regionOffset(region)).toString();
}
@Override
public String createToken(int my_slot, int totalCount, String region)
{
return initialToken(totalCount, my_slot, regionOffset(region)).toString();
}
@Override
public BigInteger findClosestToken(BigInteger tokenToSearch, List<BigInteger> tokenList)
{
Preconditions.checkArgument(!tokenList.isEmpty(), "token list must not be empty");
List<BigInteger> sortedTokens = Ordering.natural().sortedCopy(tokenList);
int index = Ordering.natural().binarySearch(sortedTokens, tokenToSearch);
if (index < 0)
{
int i = Math.abs(index) - 1;
if ((i >= sortedTokens.size()) || (i > 0 && sortedTokens.get(i).subtract(tokenToSearch)
.compareTo(tokenToSearch.subtract(sortedTokens.get(i - 1))) > 0))
--i;
return sortedTokens.get(i);
}
return sortedTokens.get(index);
}
/**
* Due to warm bootstrap feature, we make region offset to be the same for all DCs
* and will support different offsets later
*/
@Override
public int regionOffset(String dataCenter)
{
return Math.abs(reverse("Dynomite").hashCode());
//return Math.abs(reverse(dataCenter).hashCode());
}
private String reverse(String s)
{
if (s == null)
return null;
StringBuilder sb = new StringBuilder();
for(int i=s.length()-1; i>=0; i--) {
sb.append(s.charAt(i));
}
return sb.toString();
}
}
| 3,142 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/Sleeper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import com.google.inject.ImplementedBy;
/**
* An abstraction to {@link Thread#sleep(long)} so we can mock it in tests.
*/
@ImplementedBy(ThreadSleeper.class)
public interface Sleeper
{
void sleep(long waitTimeMs) throws InterruptedException;
void sleepQuietly(long waitTimeMs);
}
| 3,143 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/ThreadSleeper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
/**
* Sleeper impl that delegates to Thread.sleep
*/
public class ThreadSleeper implements Sleeper
{
@Override
public void sleep(long waitTimeMs) throws InterruptedException
{
Thread.sleep(waitTimeMs);
}
public void sleepQuietly(long waitTimeMs)
{
try
{
sleep(waitTimeMs);
}
catch (InterruptedException e)
{
//no-op
}
}
} | 3,144 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/RetryableCallable.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
public abstract class RetryableCallable<T> implements Callable<T>
{
private static final Logger logger = LoggerFactory.getLogger(RetryableCallable.class);
public static final int DEFAULT_NUMBER_OF_RETRIES = 15;
public static final long DEFAULT_WAIT_TIME = 100;
private int retrys;
private long waitTime;
public RetryableCallable()
{
this(DEFAULT_NUMBER_OF_RETRIES, DEFAULT_WAIT_TIME);
}
public RetryableCallable(int retrys, long waitTime)
{
set(retrys, waitTime);
}
public void set(int retrys, long waitTime)
{
this.retrys = retrys;
this.waitTime = waitTime;
}
public abstract T retriableCall() throws Exception;
public T call() throws Exception
{
int retry = 0;
int logCounter = 0;
while (true)
{
try
{
return retriableCall();
}
catch (CancellationException e)
{
throw e;
}
catch (Exception e)
{
retry++;
if (retry == retrys)
{
throw e;
}
logger.error(String.format("Retry #%d for: %s",retry, e.getMessage()));
if(++logCounter == 1)
logger.error("Exception --> "+ExceptionUtils.getFullStackTrace(e));
Thread.sleep(waitTime);
}
finally
{
forEachExecution();
}
}
}
public void forEachExecution()
{
// do nothing by default.
}
} | 3,145 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/ProcessTuner.java | package com.netflix.nfsidecar.utils;
import java.io.IOException;
public interface ProcessTuner
{
void writeAllProperties(String yamlLocation) throws Exception;
}
| 3,146 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/ExponentialRetryCallable.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import java.util.concurrent.CancellationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class ExponentialRetryCallable<T> extends RetryableCallable<T>
{
public final static long MAX_SLEEP = 240000;
public final static long MIN_SLEEP = 200;
private static final Logger logger = LoggerFactory.getLogger(RetryableCallable.class);
private long max;
private long min;
public ExponentialRetryCallable()
{
this.max = MAX_SLEEP;
this.min = MIN_SLEEP;
}
public ExponentialRetryCallable(long minSleep, long maxSleep)
{
this.max = maxSleep;
this.min = minSleep;
}
public T call() throws Exception
{
long delay = min;// ms
while (true)
{
try
{
return retriableCall();
}
catch (CancellationException e)
{
throw e;
}
catch (Exception e)
{
delay *= 2;
if (delay > max)
{
throw e;
}
logger.error(e.getMessage());
Thread.sleep(delay);
}
finally
{
forEachExecution();
}
}
}
}
| 3,147 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/utils/BoundedExponentialRetryCallable.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.utils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.CancellationException;
public abstract class BoundedExponentialRetryCallable<T> extends RetryableCallable<T>
{
public final static long MAX_SLEEP = 10000;
public final static long MIN_SLEEP = 1000;
public final static int MAX_RETRIES = 10;
private static final Logger logger = LoggerFactory.getLogger(BoundedExponentialRetryCallable.class);
private long max;
private long min;
private int maxRetries;
private final ThreadSleeper sleeper = new ThreadSleeper();
public BoundedExponentialRetryCallable()
{
this.max = MAX_SLEEP;
this.min = MIN_SLEEP;
this.maxRetries = MAX_RETRIES;
}
public BoundedExponentialRetryCallable(long minSleep, long maxSleep, int maxNumRetries)
{
this.max = maxSleep;
this.min = minSleep;
this.maxRetries = maxNumRetries;
}
public T call() throws Exception {
long delay = min;// ms
int retry = 0;
while (true) {
try {
return retriableCall();
} catch (CancellationException e) {
throw e;
} catch (Exception e) {
retry++;
if (delay < max && retry <= maxRetries) {
delay *= 2;
logger.error(String.format("Retry #%d for: %s", retry, e.getMessage()));
sleeper.sleep(delay);
} else if (delay >= max && retry <= maxRetries) {
logger.error(String.format("Retry #%d for: %s", retry, ExceptionUtils.getFullStackTrace(e)));
sleeper.sleep(max);
} else {
logger.info("Exception --> " + ExceptionUtils.getFullStackTrace(e));
throw e;
}
} finally {
forEachExecution();
}
}
}
public void setMax(long max) {
this.max = max;
}
public void setMin(long min) {
this.min = min;
}
public void setMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
}
}
| 3,148 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/supplier/HostSupplier.java | package com.netflix.nfsidecar.supplier;
import java.util.List;
import com.google.common.base.Supplier;
import com.netflix.astyanax.connectionpool.Host;
public interface HostSupplier {
public Supplier<List<Host>> getSupplier(String clusterName);
} | 3,149 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/supplier/LocalHostSupplier.java | package com.netflix.nfsidecar.supplier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Supplier;
import com.google.inject.Inject;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.nfsidecar.config.CassCommonConfig;
/**
* Use the {@code DM_CASSANDRA_CLUSTER_SEEDS} environment variable to provide a
* list of Cassandra hosts that contain the complete Dynomite topology.
*/
public class LocalHostSupplier implements HostSupplier {
private static final String errMsg = "DM_CASSANDRA_CLUSTER_SEEDS cannot be empty. It must contain one or more Cassandra hosts.";
private CassCommonConfig config;
@Inject
public LocalHostSupplier(CassCommonConfig config) {
this.config = config;
}
@Override
public Supplier<List<Host>> getSupplier(String clusterName) {
final List<Host> hosts = new ArrayList<Host>();
String bootCluster = config.getCassandraClusterName();
if (bootCluster.equals(clusterName)) {
String seeds = System.getenv("DM_CASSANDRA_CLUSTER_SEEDS");
if (seeds == null || "".equals(seeds))
throw new RuntimeException(errMsg);
List<String> cassHostnames = new ArrayList<String>(Arrays.asList(StringUtils.split(seeds, ",")));
if (cassHostnames.size() == 0)
throw new RuntimeException(errMsg);
for (String cassHost : cassHostnames) {
hosts.add(new Host(cassHost, 9160));
}
} else {
hosts.add(new Host("127.0.0.1", 9160).setRack("localdc"));
}
return new Supplier<List<Host>>() {
@Override
public List<Host> get() {
return hosts;
}
};
}
}
| 3,150 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/supplier/EurekaHostSupplier.java | package com.netflix.nfsidecar.supplier;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.shared.Application;
/**
* Use Eureka to provide a list of Cassandra hosts that contain the complete
* Dynomite topology.
*
* EurekaHostsSupplier provides a {@code Supplier<List<Host>>} via the
* {DiscoveryManager} (i.e. the Eureka client).
*
* Note that the class needs the Eureka application name to discover all
* instances for that application.
*/
@Singleton
public class EurekaHostSupplier implements HostSupplier {
private static final Logger LOG = LoggerFactory.getLogger(EurekaHostSupplier.class);
// Eureka client
private final DiscoveryClient discoveryClient;
@Inject
public EurekaHostSupplier(DiscoveryClient discoveryClient) {
this.discoveryClient = discoveryClient;
}
/**
* Get a list of Cassandra hosts that contain the complete Dynomite
* topology.
*
* @param clusterName
* name of the Dynomite cluster
* @return a Supplier that returns a list of Cassandra hosts
*/
@Override
public Supplier<List<Host>> getSupplier(final String clusterName) {
return new Supplier<List<Host>>() {
@Override
public List<Host> get() {
if (discoveryClient == null) {
LOG.error("Eureka DiscoveryClient cannot be null");
throw new RuntimeException("EurekaHostsSupplier needs a non-null DiscoveryClient");
}
LOG.debug("Fetching instance list for app: " + clusterName);
Application app = discoveryClient.getApplication(clusterName.toUpperCase());
List<Host> hosts = new ArrayList<Host>();
if (app == null) {
LOG.warn("Cluster '{}' not found in Eureka", clusterName);
return hosts;
}
List<InstanceInfo> ins = app.getInstances();
if (ins == null || ins.isEmpty()) {
LOG.warn("Cluster '{}' found in Eureka but has no instances", clusterName);
return hosts;
}
hosts = Lists
.newArrayList(Collections2.transform(Collections2.filter(ins, new Predicate<InstanceInfo>() {
@Override
public boolean apply(InstanceInfo input) {
return input.getStatus() == InstanceInfo.InstanceStatus.UP;
}
}), new Function<InstanceInfo, Host>() {
@Override
public Host apply(InstanceInfo info) {
String[] parts = StringUtils.split(StringUtils.split(info.getHostName(), ".")[0], '-');
Host host = new Host(info.getHostName(), info.getPort())
.addAlternateIpAddress(StringUtils
.join(new String[] { parts[1], parts[2], parts[3], parts[4] }, "."))
.addAlternateIpAddress(info.getIPAddr()).setId(info.getId());
try {
if (info.getDataCenterInfo() instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo) info.getDataCenterInfo();
host.setRack(amazonInfo.get(MetaDataKey.availabilityZone));
}
} catch (Throwable t) {
LOG.error("Error getting rack for host " + host.getName(), t);
}
return host;
}
}));
LOG.debug("Found hosts in Eureka. Num hosts: " + hosts.size());
return hosts;
}
};
}
}
| 3,151 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/instance/AwsInstanceDataRetriever.java | package com.netflix.nfsidecar.instance;
import com.netflix.nfsidecar.utils.SystemUtils;
/**
* Calls AWS ec2 metadata to get info on the location of the running instance.
*
*/
public class AwsInstanceDataRetriever implements InstanceDataRetriever {
public String getRac() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/placement/availability-zone");
}
public String getPublicHostname() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/public-hostname");
}
public String getPublicIP() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/public-ipv4");
}
public String getInstanceId() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/instance-id");
}
public String getInstanceType() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/instance-type");
}
@Override
/*
* @return id of the network interface for running instance
*/
public String getMac() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/network/interfaces/macs/").trim();
}
@Override
public String getVpcId() {
throw new UnsupportedOperationException("Not applicable as running instance is in classic environment");
}
}
| 3,152 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/instance/VpcInstanceDataRetriever.java | package com.netflix.nfsidecar.instance;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.nfsidecar.utils.SystemUtils;
/**
* Calls AWS ec2 metadata to get info on the location of the running instance in
* VPC. Public Hostname will return local-hostname Public IP will return
* local-ipv4
*/
public class VpcInstanceDataRetriever implements InstanceDataRetriever {
private static final Logger logger = LoggerFactory.getLogger(VpcInstanceDataRetriever.class);
public String getRac() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/placement/availability-zone");
}
public String getPublicHostname() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/public-hostname");
}
public String getPublicIP() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/public-ipv4");
}
public String getInstanceId() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/instance-id");
}
public String getInstanceType() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/instance-type");
}
@Override
/*
* @return id of the network interface for running instance
*/
public String getMac() {
return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/network/interfaces/macs/").trim();
}
@Override
/*
* @return the id of the vpc account for running instance, null if does not
* exist.
*/
public String getVpcId() {
String nacId = getMac();
if (nacId == null || nacId.isEmpty())
return null;
String vpcId = null;
try {
vpcId = SystemUtils
.getDataFromUrl(
"http://169.254.169.254/latest/meta-data/network/interfaces/macs/" + nacId + "vpc-id")
.trim();
} catch (Exception e) {
logger.info(
"Vpc id does not exist for running instance, not fatal as running instance maybe not be in vpc. Msg: "
+ e.getLocalizedMessage());
}
return vpcId;
}
}
| 3,153 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/instance/InstanceDataRetriever.java | package com.netflix.nfsidecar.instance;
public interface InstanceDataRetriever
{
String getRac();
String getPublicHostname();
String getPublicIP();
String getInstanceId();
String getInstanceType();
String getMac(); //fetch id of the network interface for running instance
String getVpcId(); //the id of the vpc for running instance
}
| 3,154 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/instance/LocalInstanceDataRetriever.java | package com.netflix.nfsidecar.instance;
/**
* Looks at local (system) properties for metadata about the running 'instance'.
* Typically, this is used for locally-deployed testing.
*
* @author jason brown
*/
public class LocalInstanceDataRetriever implements InstanceDataRetriever
{
private static final String PREFIX = "florida.localInstance.";
public String getRac()
{
return System.getProperty(PREFIX + "availabilityZone", "");
}
public String getPublicHostname()
{
return System.getProperty(PREFIX + "publicHostname", "");
}
public String getPublicIP()
{
return System.getProperty(PREFIX + "publicIp", "");
}
public String getInstanceId()
{
return System.getProperty(PREFIX + "instanceId", "");
}
public String getInstanceType()
{
return System.getProperty(PREFIX + "instanceType", "");
}
public String getMac() {
return System.getProperty(PREFIX + "instanceMac", "");
}
@Override
public String getVpcId() {
throw new UnsupportedOperationException("Not applicable as running instance is in classic environment");
}
}
| 3,155 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/aws/ClearCredential.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.aws;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
/**
* This is a basic implementation of ICredentials. User should prefer to
* implement their own versions for more secured access. This class requires
* clear AWS key and access.
*
* Set the following properties in "conf/awscredntial.properties"
*
*/
public class ClearCredential implements ICredential {
private static final Logger logger = LoggerFactory.getLogger(ClearCredential.class);
private static final String CRED_FILE = "/etc/awscredential.properties";
private final Properties props;
private final String AWS_ACCESS_ID;
private final String AWS_KEY;
public ClearCredential() {
FileInputStream fis = null;
try {
fis = new FileInputStream(CRED_FILE);
props = new Properties();
props.load(fis);
AWS_ACCESS_ID = props.getProperty("AWSACCESSID") != null ? props.getProperty("AWSACCESSID").trim() : "";
AWS_KEY = props.getProperty("AWSKEY") != null ? props.getProperty("AWSKEY").trim() : "";
} catch (Exception e) {
logger.error("Exception with credential file ", e);
throw new RuntimeException("Problem reading credential file. Cannot start.", e);
} finally {
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
public String getAccessKeyId() {
return AWS_ACCESS_ID;
}
public String getSecretAccessKey() {
return AWS_KEY;
}
public AWSCredentials getCredentials() {
return new BasicAWSCredentials(getAccessKeyId(), getSecretAccessKey());
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
return new AWSCredentialsProvider() {
public AWSCredentials getCredentials() {
return ClearCredential.this.getCredentials();
}
@Override
public void refresh() {
// NOP
}
};
}
}
| 3,156 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/aws/AWSMembership.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.aws;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsResult;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.autoscaling.model.UpdateAutoScalingGroupRequest;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest;
import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest;
import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult;
import com.amazonaws.services.ec2.model.Filter;
import com.amazonaws.services.ec2.model.IpPermission;
import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest;
import com.amazonaws.services.ec2.model.SecurityGroup;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.nfsidecar.identity.IMembership;
import com.netflix.nfsidecar.identity.InstanceEnvIdentity;
import com.netflix.nfsidecar.instance.InstanceDataRetriever;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
/**
* Class to query amazon ASG for its members to provide - Number of valid nodes
* in the ASG - Number of zones - Methods for adding ACLs for the nodes
*/
public class AWSMembership implements IMembership {
private static final Logger logger = LoggerFactory.getLogger(AWSMembership.class);
private final ICredential provider;
private final ICredential crossAccountProvider;
private final InstanceEnvIdentity insEnvIdentity;
private final InstanceDataRetriever retriever;
private final IEnvVariables envVariables;
@Inject
public AWSMembership(ICredential provider, @Named("awsroleassumption") ICredential crossAccountProvider,
InstanceEnvIdentity insEnvIdentity, InstanceDataRetriever retriever, IEnvVariables envVariables) {
this.provider = provider;
this.crossAccountProvider = crossAccountProvider;
this.insEnvIdentity = insEnvIdentity;
this.retriever = retriever;
this.envVariables = envVariables;
}
@Override
public List<String> getRacMembership() {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(envVariables.getRack());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
for (Instance ins : asg.getInstances())
if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating")
|| ins.getLifecycleState().equalsIgnoreCase("shutting-down")
|| ins.getLifecycleState().equalsIgnoreCase("Terminated")))
instanceIds.add(ins.getInstanceId());
}
logger.info(String.format("Querying Amazon returned following instance in the ASG: %s --> %s",
envVariables.getRack(), StringUtils.join(instanceIds, ",")));
return instanceIds;
} finally {
if (client != null)
client.shutdown();
}
}
@Override
public List<String> getCrossAccountRacMembership() {
AmazonAutoScaling client = null;
try {
client = getCrossAccountAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(envVariables.getRack());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
for (Instance ins : asg.getInstances())
if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating")
|| ins.getLifecycleState().equalsIgnoreCase("shutting-down")
|| ins.getLifecycleState().equalsIgnoreCase("Terminated")))
instanceIds.add(ins.getInstanceId());
}
logger.info(String.format("Querying Amazon returned following instance in the cross-account ASG: %s --> %s",
envVariables.getRack(), StringUtils.join(instanceIds, ",")));
return instanceIds;
} finally {
if (client != null)
client.shutdown();
}
}
/**
* Actual membership AWS source of truth...
*/
@Override
public int getRacMembershipSize() {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(envVariables.getRack());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
int size = 0;
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
size += asg.getMaxSize();
}
logger.info(String.format("Query on ASG returning %d instances", size));
return size;
} finally {
if (client != null)
client.shutdown();
}
}
/**
* Cross-account member of AWS
*/
@Override
public int getCrossAccountRacMembershipSize() {
AmazonAutoScaling client = null;
try {
client = getCrossAccountAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(envVariables.getRack());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
int size = 0;
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
size += asg.getMaxSize();
}
logger.info(String.format("Query on cross account ASG returning %d instances", size));
return size;
} finally {
if (client != null)
client.shutdown();
}
}
/**
* Adding peers' IPs as ingress to the running instance SG. The running
* instance could be in "classic" or "vpc"
*/
public void addACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<IpPermission>();
ipPermissions.add(
new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to));
if (this.insEnvIdentity.isClassic()) {
client.authorizeSecurityGroupIngress(
new AuthorizeSecurityGroupIngressRequest(envVariables.getDynomiteClusterName(), ipPermissions));
logger.info("Done adding ACL to classic: " + StringUtils.join(listIPs, ","));
} else {
AuthorizeSecurityGroupIngressRequest sgIngressRequest = new AuthorizeSecurityGroupIngressRequest();
// fetch SG group id for VPC account of the running instances
sgIngressRequest.withGroupId(getVpcGroupId());
// Add peer's IPs as ingress to the SG that the running instance
// belongs to
client.authorizeSecurityGroupIngress(sgIngressRequest.withIpPermissions(ipPermissions));
logger.info("Done adding ACL to vpc: " + StringUtils.join(listIPs, ","));
}
} finally {
if (client != null)
client.shutdown();
}
}
/*
* @return SG group id for a group name, vpc account of the running
* instance. ACLGroupName = Cluster name and VPC-ID is the VPC We need both
* filters to find the SG for that cluster and that vpc-id
*/
protected String getVpcGroupId() {
AmazonEC2 client = null;
try {
client = getEc2Client();
Filter nameFilter = new Filter().withName("group-name").withValues(envVariables.getDynomiteClusterName()); // SG
Filter vpcFilter = new Filter().withName("vpc-id").withValues(retriever.getVpcId());
logger.info("Dynomite name: " + envVariables.getDynomiteClusterName());
DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter);
DescribeSecurityGroupsResult result = client.describeSecurityGroups(req);
for (SecurityGroup group : result.getSecurityGroups()) {
logger.debug(String.format("got group-id:%s for group-name:%s,vpc-id:%s", group.getGroupId(),
envVariables.getDynomiteClusterName(), retriever.getVpcId()));
return group.getGroupId();
}
logger.error(String.format("unable to get group-id for group-name=%s vpc-id=%s",
envVariables.getDynomiteClusterName(), retriever.getVpcId()));
return "";
} finally {
if (client != null)
client.shutdown();
}
}
/**
* removes a iplist from the SG
*/
public void removeACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<IpPermission>();
ipPermissions.add(
new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to));
if (this.insEnvIdentity.isClassic()) {
client.revokeSecurityGroupIngress(
new RevokeSecurityGroupIngressRequest(envVariables.getDynomiteClusterName(), ipPermissions));
logger.info("Done removing from ACL within classic env for running instance: "
+ StringUtils.join(listIPs, ","));
} else {
RevokeSecurityGroupIngressRequest req = new RevokeSecurityGroupIngressRequest();
req.withGroupId(getVpcGroupId()); // fetch SG group id for vpc
// account of the running
// instance.
// Adding Peer's IPs as ingress to the running instances
client.revokeSecurityGroupIngress(req.withIpPermissions(ipPermissions));
logger.info("Done removing from ACL within vpc env for running instance: "
+ StringUtils.join(listIPs, ","));
}
} finally {
if (client != null)
client.shutdown();
}
}
/**
* List SG ACL's
*/
public List<String> listACL(int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<String> ipPermissions = new ArrayList<String>();
Filter nameFilter = new Filter().withName("group-name").withValues(envVariables.getDynomiteClusterName());
String vpcid = retriever.getVpcId();
if (vpcid == null || vpcid.isEmpty()) {
throw new IllegalStateException("vpcid is null even though instance is running in vpc.");
}
Filter vpcFilter = new Filter().withName("vpc-id").withValues(vpcid);
DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter);
DescribeSecurityGroupsResult result = client.describeSecurityGroups(req);
for (SecurityGroup group : result.getSecurityGroups())
for (IpPermission perm : group.getIpPermissions())
if (perm.getFromPort() == from && perm.getToPort() == to)
ipPermissions.addAll(perm.getIpRanges());
logger.info("Fetch current permissions for vpc env of running instance");
return ipPermissions;
} finally {
if (client != null)
client.shutdown();
}
}
@Override
public void expandRacMembership(int count) {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest()
.withAutoScalingGroupNames(envVariables.getRack());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
AutoScalingGroup asg = res.getAutoScalingGroups().get(0);
UpdateAutoScalingGroupRequest ureq = new UpdateAutoScalingGroupRequest();
ureq.setAutoScalingGroupName(asg.getAutoScalingGroupName());
ureq.setMinSize(asg.getMinSize() + 1);
ureq.setMaxSize(asg.getMinSize() + 1);
ureq.setDesiredCapacity(asg.getMinSize() + 1);
client.updateAutoScalingGroup(ureq);
} finally {
if (client != null)
client.shutdown();
}
}
protected AmazonAutoScaling getAutoScalingClient() {
AmazonAutoScaling client = new AmazonAutoScalingClient(provider.getAwsCredentialProvider());
client.setEndpoint("autoscaling." + envVariables.getRegion() + ".amazonaws.com");
return client;
}
protected AmazonAutoScaling getCrossAccountAutoScalingClient() {
AmazonAutoScaling client = new AmazonAutoScalingClient(crossAccountProvider.getAwsCredentialProvider());
client.setEndpoint("autoscaling." + envVariables.getRegion() + ".amazonaws.com");
return client;
}
protected AmazonEC2 getEc2Client() {
AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider());
client.setEndpoint("ec2." + envVariables.getRegion() + ".amazonaws.com");
return client;
}
protected AmazonEC2 getCrossAccountEc2Client() {
AmazonEC2 client = new AmazonEC2Client(crossAccountProvider.getAwsCredentialProvider());
client.setEndpoint("ec2." + envVariables.getRegion() + ".amazonaws.com");
return client;
}
} | 3,157 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/aws/UpdateSecuritySettings.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.aws;
import java.util.List;
import java.util.Random;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.identity.AppsInstance;
import com.netflix.nfsidecar.identity.IMembership;
import com.netflix.nfsidecar.identity.InstanceIdentity;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.nfsidecar.tokensdb.IAppsInstanceFactory;
@Singleton
public class UpdateSecuritySettings extends Task {
public static final String JOBNAME = "Update_SG";
public static boolean firstTimeUpdated = false;
private static final Random ran = new Random();
private final IMembership membership;
private final IAppsInstanceFactory factory;
private final IEnvVariables envVariables;
private final CommonConfig config;
@Inject
public UpdateSecuritySettings(CommonConfig config, IMembership membership, IAppsInstanceFactory factory,
IEnvVariables envVariables) {
this.config = config;
this.membership = membership;
this.factory = factory;
this.envVariables = envVariables;
}
@Override
public void execute() {
// if seed dont execute.
int port = config.getDynomitePeerPort();
List<String> acls = membership.listACL(port, port);
List<AppsInstance> instances = factory.getAllIds(envVariables.getDynomiteClusterName());
// iterate to add...
List<String> add = Lists.newArrayList();
for (AppsInstance instance : factory.getAllIds(envVariables.getDynomiteClusterName())) {
String range = instance.getHostIP() + "/32";
if (!acls.contains(range))
add.add(range);
}
if (add.size() > 0) {
membership.addACL(add, port, port);
firstTimeUpdated = true;
}
// just iterate to generate ranges.
List<String> currentRanges = Lists.newArrayList();
for (AppsInstance instance : instances) {
String range = instance.getHostIP() + "/32";
currentRanges.add(range);
}
// iterate to remove...
List<String> remove = Lists.newArrayList();
for (String acl : acls)
if (!currentRanges.contains(acl)) // if not found then remove....
remove.add(acl);
if (remove.size() > 0) {
membership.removeACL(remove, port, port);
firstTimeUpdated = true;
}
}
public static TaskTimer getTimer(InstanceIdentity id) {
SimpleTimer return_;
if (id.isSeed())
return_ = new SimpleTimer(JOBNAME, 120 * 1000 + ran.nextInt(120 * 1000));
else
return_ = new SimpleTimer(JOBNAME);
return return_;
}
@Override
public String getName() {
return JOBNAME;
}
}
| 3,158 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/aws/AwsInstanceEnvIdentity.java | package com.netflix.nfsidecar.aws;
import com.netflix.nfsidecar.identity.InstanceEnvIdentity;
import com.netflix.nfsidecar.instance.InstanceDataRetriever;
import com.netflix.nfsidecar.instance.VpcInstanceDataRetriever;
/**
* A means to determine if running instance is within classic, default vpc account, or non-default vpc account
*/
public class AwsInstanceEnvIdentity implements InstanceEnvIdentity {
private Boolean isClassic = false, isDefaultVpc = false, isNonDefaultVpc = false;
public AwsInstanceEnvIdentity() {
String vpcId = getVpcId();
if (vpcId == null || vpcId.isEmpty()) {
this.isClassic = true;
} else {
this.isNonDefaultVpc = true; // our instances run under a non
// default ("persistence_*") AWS acct
}
}
/*
* @return the vpc id of the running instance, null if instance is not
* running within vpc.
*/
private String getVpcId() {
InstanceDataRetriever insDataRetriever = new VpcInstanceDataRetriever();
return insDataRetriever.getVpcId();
}
@Override
public Boolean isClassic() {
return this.isClassic;
}
@Override
public Boolean isDefaultVpc() {
return this.isDefaultVpc;
}
@Override
public Boolean isNonDefaultVpc() {
return this.isNonDefaultVpc;
}
}
| 3,159 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/aws/ICredential.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.aws;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.google.inject.ImplementedBy;
/**
* Credential file interface for services supporting
* Access ID and key authentication
*/
@ImplementedBy(ClearCredential.class)
public interface ICredential
{
public AWSCredentialsProvider getAwsCredentialProvider();
}
| 3,160 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/aws/IAMCredential.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.aws;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
public class IAMCredential implements ICredential
{
private final InstanceProfileCredentialsProvider iamCredProvider;
public IAMCredential()
{
this.iamCredProvider = new InstanceProfileCredentialsProvider();
}
public AWSCredentialsProvider getAwsCredentialProvider()
{
return iamCredProvider;
}
}
| 3,161 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/backup/Backup.java | package com.netflix.nfsidecar.backup;
import java.io.File;
import org.joda.time.DateTime;
public interface Backup {
boolean upload(File file, DateTime todayStart);
}
| 3,162 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/backup/Restore.java | package com.netflix.nfsidecar.backup;
public interface Restore {
boolean restoreData(String dateString);
}
| 3,163 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/tokensdb/IAppsInstanceFactory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.nfsidecar.tokensdb;
import java.util.List;
import java.util.Map;
import com.netflix.nfsidecar.identity.AppsInstance;
/**
* Interface for managing Dynomite instance data. Provides functionality
* to register, update, delete or list instances from the registry
*/
public interface IAppsInstanceFactory
{
/**
* Return a list of all Dynomite server nodes registered.
* @param appName the cluster name
* @return a list of all nodes in {@code appName}
*/
public List<AppsInstance> getAllIds(String appName);
/**
* Return a list of Local Dynomite server nodes registered.
* @param appName the cluster name
* @param region the the region of the node
* @return a list of nodes in {@code appName} and same Racks
*/
public List<AppsInstance> getLocalDCIds(String appName, String region);
/**
* Return the Dynomite server node with the given {@code id}.
* @param appName the cluster name
* @param id the node id
* @return the node with the given {@code id}, or {@code null} if none found
*/
public AppsInstance getInstance(String appName, String dc, int id);
/**
* Create/Register an instance of the server with its info.
* @param app
* @param id
* @param instanceID
* @param hostname
* @param ip
* @param rac
* @param volumes
* @param token
* @return the new node
*/
public AppsInstance create(String app, int id, String instanceID, String hostname, int dynomitePort, int dynomiteSecurePort, int dynomiteSecureStoragePort, int peerPort, String ip, String rac,
Map<String, Object> volumes, String token, String datacenter);
/**
* Delete the server node from the registry
* @param inst the node to delete
*/
public void delete(AppsInstance inst);
/**
* Update the details of the server node in registry
* @param inst the node to update
*/
public void update(AppsInstance inst);
/**
* Sort the list by instance ID
* @param return_ the list of nodes to sort
*/
public void sort(List<AppsInstance> return_);
/**
* Attach volumes if required
* @param instance
* @param mountPath
* @param device
*/
public void attachVolumes(AppsInstance instance, String mountPath, String device);
} | 3,164 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/tokensdb/CassandraInstanceFactory.java | package com.netflix.nfsidecar.tokensdb;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.identity.AppsInstance;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
/**
* Factory to use Cassandra for managing instance data
*/
@Singleton
public class CassandraInstanceFactory implements IAppsInstanceFactory
{
private static final Logger logger = LoggerFactory.getLogger(CassandraInstanceFactory.class);
CommonConfig config;
InstanceDataDAOCassandra dao;
IEnvVariables envVariables;
@Inject
public CassandraInstanceFactory(CommonConfig config, InstanceDataDAOCassandra dao, IEnvVariables envVariables) {
this.config = config;
this.dao = dao;
this.envVariables = envVariables;
}
public List<AppsInstance> getAllIds(String appName)
{
List<AppsInstance> return_ = new ArrayList<AppsInstance>();
for (AppsInstance instance : dao.getAllInstances(appName)) {
return_.add(instance);
}
sort(return_);
return return_;
}
public List<AppsInstance> getLocalDCIds(String appName, String region)
{
List<AppsInstance> return_ = new ArrayList<AppsInstance>();
for (AppsInstance instance : dao.getLocalDCInstances(appName, region)) {
return_.add(instance);
}
sort(return_);
return return_;
}
public void sort(List<AppsInstance> return_)
{
Comparator<? super AppsInstance> comparator = new Comparator<AppsInstance>()
{
@Override
public int compare(AppsInstance o1, AppsInstance o2)
{
Integer c1 = o1.getId();
Integer c2 = o2.getId();
return c1.compareTo(c2);
}
};
Collections.sort(return_, comparator);
}
public AppsInstance create(String app, int id, String instanceID, String hostname, int dynomitePort, int dynomiteSecurePort, int dynomiteSecureStoragePort, int peerPort, String ip, String zone, Map<String, Object> volumes, String payload, String rack)
{
try {
Map<String, Object> v = (volumes == null) ? new HashMap<String, Object>() : volumes;
AppsInstance ins = new AppsInstance();
ins.setApp(app);
ins.setZone(zone);
ins.setRack(rack);
ins.setHost(hostname);
ins.setDynomitePort(dynomitePort);
ins.setDynomiteSecurePort(dynomiteSecurePort);
ins.setDynomiteSecureStoragePort(dynomiteSecureStoragePort);
ins.setPeerPort(peerPort);
ins.setHostIP(ip);
ins.setId(id);
ins.setInstanceId(instanceID);
ins.setDatacenter(envVariables.getRegion());
ins.setToken(payload);
ins.setVolumes(v);
// remove old data node which are dead.
//if (app.endsWith("-dead")) {
// AppsInstance oldData = dao.getInstance(app, ins.getRack(), id);
// clean up a very old data...
//if (null != oldData)
// dao.deleteInstanceEntry(oldData);
//}
dao.createInstanceEntry(ins);
return ins;
}
catch (Exception e) {
logger.error(e.getMessage());
throw new RuntimeException(e);
}
}
public void delete(AppsInstance inst)
{
try {
dao.deleteInstanceEntry(inst);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
public void update(AppsInstance inst)
{
try {
dao.createInstanceEntry(inst);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void attachVolumes(AppsInstance instance, String mountPath, String device)
{
throw new UnsupportedOperationException("Volumes not supported");
}
@Override
public AppsInstance getInstance(String appName, String dc, int id)
{
return dao.getInstance(appName, dc, id);
}
}
| 3,165 |
0 | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar | Create_ds/dynomite-manager/dynomitemanager-common/src/main/java/com/netflix/nfsidecar/tokensdb/InstanceDataDAOCassandra.java | package com.netflix.nfsidecar.tokensdb;
import java.util.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.base.Supplier;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolType;
import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
import com.netflix.astyanax.model.*;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.thrift.ThriftFamilyFactory;
import com.netflix.astyanax.util.TimeUUIDUtils;
import com.netflix.nfsidecar.config.CassCommonConfig;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.identity.AppsInstance;
import com.netflix.nfsidecar.supplier.HostSupplier;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class InstanceDataDAOCassandra {
private static final Logger logger = LoggerFactory.getLogger(InstanceDataDAOCassandra.class);
private String CN_ID = "Id";
private String CN_APPID = "appId";
private String CN_AZ = "availabilityZone";
private String CN_DC = "datacenter";
private String CN_INSTANCEID = "instanceId";
private String CN_HOSTNAME = "hostname";
private String CN_DYNOMITE_PORT = "dynomitePort";
private String CN_DYNOMITE_SECURE_PORT = "dynomiteSecurePort";
private String CN_DYNOMITE_SECURE_STORAGE_PORT = "dynomiteSecureStoragePort";
private String CN_PEER_PORT = "peerPort";
private String CN_EIP = "elasticIP";
private String CN_TOKEN = "token";
private String CN_LOCATION = "location";
private String CN_VOLUME_PREFIX = "ssVolumes";
private String CN_UPDATETIME = "updatetime";
private String CF_NAME_TOKENS = "tokens";
private String CF_NAME_LOCKS = "locks";
private final Keyspace bootKeyspace;
private final CommonConfig commonConfig;
private final CassCommonConfig cassCommonConfig;
private final HostSupplier hostSupplier;
private final String BOOT_CLUSTER;
private final String KS_NAME;
private final int thriftPortForAstyanax;
private final AstyanaxContext<Keyspace> ctx;
private long lastTimeCassandraPull = 0;
private Set<AppsInstance> appInstances;
private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private final Lock read = readWriteLock.readLock();
private final Lock write = readWriteLock.writeLock();
/*
* Schema: create column family tokens with comparator=UTF8Type and
* column_metadata=[ {column_name: appId, validation_class:
* UTF8Type,index_type: KEYS}, {column_name: instanceId, validation_class:
* UTF8Type}, {column_name: token, validation_class: UTF8Type},
* {column_name: availabilityZone, validation_class: UTF8Type},
* {column_name: hostname, validation_class: UTF8Type},{column_name: Id,
* validation_class: UTF8Type}, {column_name: elasticIP, validation_class:
* UTF8Type}, {column_name: updatetime, validation_class: TimeUUIDType},
* {column_name: location, validation_class: UTF8Type}];
*/
public ColumnFamily<String, String> CF_TOKENS = new ColumnFamily<String, String>(CF_NAME_TOKENS,
StringSerializer.get(), StringSerializer.get());
// Schema: create column family locks with comparator=UTF8Type;
public ColumnFamily<String, String> CF_LOCKS = new ColumnFamily<String, String>(CF_NAME_LOCKS,
StringSerializer.get(), StringSerializer.get());
@Inject
public InstanceDataDAOCassandra(CommonConfig commonConfig, CassCommonConfig cassCommonConfig, HostSupplier hostSupplier) throws ConnectionException {
this.cassCommonConfig = cassCommonConfig;
this.commonConfig = commonConfig;
BOOT_CLUSTER = cassCommonConfig.getCassandraClusterName();
if (BOOT_CLUSTER == null || BOOT_CLUSTER.isEmpty())
throw new RuntimeException(
"Cassandra cluster name cannot be blank. Please use getCassandraClusterName() property.");
KS_NAME = cassCommonConfig.getCassandraKeyspaceName();
if (KS_NAME == null || KS_NAME.isEmpty())
throw new RuntimeException(
"Cassandra Keyspace can not be blank. Please use getCassandraKeyspaceName() property.");
thriftPortForAstyanax = cassCommonConfig.getCassandraThriftPort();
if (thriftPortForAstyanax <= 0)
throw new RuntimeException(
"Thrift Port for Astyanax can not be blank. Please use getCassandraThriftPort() property.");
this.hostSupplier = hostSupplier;
if (cassCommonConfig.isEurekaHostsSupplierEnabled())
ctx = initWithThriftDriverWithEurekaHostsSupplier();
else
ctx = initWithThriftDriverWithExternalHostsSupplier();
ctx.start();
bootKeyspace = ctx.getClient();
}
private boolean isCassandraCacheExpired() {
if (lastTimeCassandraPull + cassCommonConfig.getTokenRefreshInterval() <= System.currentTimeMillis())
return true;
return false;
}
public void createInstanceEntry(AppsInstance instance) throws Exception {
logger.info("*** Creating New Instance Entry ***");
String key = getRowKey(instance);
// If the key exists throw exception
if (getInstance(instance.getApp(), instance.getRack(), instance.getId()) != null) {
logger.info(String.format("Key already exists: %s", key));
return;
}
getLock(instance);
try {
MutationBatch m = bootKeyspace.prepareMutationBatch();
ColumnListMutation<String> clm = m.withRow(CF_TOKENS, key);
clm.putColumn(CN_ID, Integer.toString(instance.getId()), null);
clm.putColumn(CN_APPID, instance.getApp(), null);
clm.putColumn(CN_AZ, instance.getZone(), null);
clm.putColumn(CN_DC, commonConfig.getRack(), null);
clm.putColumn(CN_INSTANCEID, instance.getInstanceId(), null);
clm.putColumn(CN_HOSTNAME, instance.getHostName(), null);
clm.putColumn(CN_DYNOMITE_PORT, Integer.toString(instance.getDynomitePort()), null);
clm.putColumn(CN_DYNOMITE_SECURE_PORT, Integer.toString(instance.getDynomiteSecurePort()), null);
clm.putColumn(CN_DYNOMITE_SECURE_STORAGE_PORT, Integer.toString(instance.getDynomiteSecureStoragePort()), null);
clm.putColumn(CN_PEER_PORT, Integer.toString(instance.getPeerPort()), null);
clm.putColumn(CN_EIP, instance.getHostIP(), null);
clm.putColumn(CN_TOKEN, instance.getToken(), null);
clm.putColumn(CN_LOCATION, instance.getDatacenter(), null);
clm.putColumn(CN_UPDATETIME, TimeUUIDUtils.getUniqueTimeUUIDinMicros(), null);
Map<String, Object> volumes = instance.getVolumes();
if (volumes != null) {
for (String path : volumes.keySet()) {
clm.putColumn(CN_VOLUME_PREFIX + "_" + path, volumes.get(path).toString(), null);
}
}
m.execute();
} catch (Exception e) {
logger.info(e.getMessage());
} finally {
releaseLock(instance);
}
}
/*
* To get a lock on the row - Create a choosing row and make sure there are
* no contenders. If there are bail out. Also delete the column when bailing
* out. - Once there are no contenders, grab the lock if it is not already
* taken.
*/
private void getLock(AppsInstance instance) throws Exception {
String choosingkey = getChoosingKey(instance);
MutationBatch m = bootKeyspace.prepareMutationBatch();
ColumnListMutation<String> clm = m.withRow(CF_LOCKS, choosingkey);
// Expire in 6 sec
clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(6));
m.execute();
int count = bootKeyspace.prepareQuery(CF_LOCKS).getKey(choosingkey).getCount().execute().getResult();
if (count > 1) {
// Need to delete my entry
m.withRow(CF_LOCKS, choosingkey).deleteColumn(instance.getInstanceId());
m.execute();
throw new Exception(String.format("More than 1 contender for lock %s %d", choosingkey, count));
}
String lockKey = getLockingKey(instance);
OperationResult<ColumnList<String>> result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute();
if (result.getResult().size() > 0
&& !result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId()))
throw new Exception(String.format("Lock already taken %s", lockKey));
clm = m.withRow(CF_LOCKS, lockKey);
clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(600));
m.execute();
Thread.sleep(100);
result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute();
if (result.getResult().size() == 1
&& result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) {
logger.info("Got lock " + lockKey);
return;
} else
throw new Exception(String.format("Cannot insert lock %s", lockKey));
}
private void releaseLock(AppsInstance instance) throws Exception {
String choosingkey = getChoosingKey(instance);
MutationBatch m = bootKeyspace.prepareMutationBatch();
ColumnListMutation<String> clm = m.withRow(CF_LOCKS, choosingkey);
m.withRow(CF_LOCKS, choosingkey).deleteColumn(instance.getInstanceId());
m.execute();
}
public void deleteInstanceEntry(AppsInstance instance) throws Exception {
// Acquire the lock first
getLock(instance);
// Delete the row
String key = findKey(instance.getApp(), String.valueOf(instance.getId()), instance.getDatacenter(),
instance.getRack());
if (key == null)
return; // don't fail it
MutationBatch m = bootKeyspace.prepareMutationBatch();
m.withRow(CF_TOKENS, key).delete();
m.execute();
key = getLockingKey(instance);
// Delete key
m = bootKeyspace.prepareMutationBatch();
m.withRow(CF_LOCKS, key).delete();
m.execute();
// Have to delete choosing key as well to avoid issues with delete
// followed by immediate writes
key = getChoosingKey(instance);
m = bootKeyspace.prepareMutationBatch();
m.withRow(CF_LOCKS, key).delete();
m.execute();
}
public AppsInstance getInstance(String app, String rack, int id) {
Set<AppsInstance> set = getAllInstances(app);
for (AppsInstance ins : set) {
if (ins.getId() == id && ins.getRack().equals(rack))
return ins;
}
return null;
}
public Set<AppsInstance> getLocalDCInstances(String app, String region) {
Set<AppsInstance> set = getAllInstances(app);
Set<AppsInstance> returnSet = new HashSet<AppsInstance>();
for (AppsInstance ins : set) {
if (ins.getDatacenter().equals(region))
returnSet.add(ins);
}
return returnSet;
}
public Set<AppsInstance> getAllInstancesFromCassandra(String app) {
Set<AppsInstance> set = new HashSet<AppsInstance>();
try {
final String selectClause = String.format(
"SELECT * FROM %s USING CONSISTENCY LOCAL_QUORUM WHERE %s = '%s' ", CF_NAME_TOKENS, CN_APPID, app);
logger.debug(selectClause);
final ColumnFamily<String, String> CF_TOKENS_NEW = ColumnFamily.newColumnFamily(KS_NAME,
StringSerializer.get(), StringSerializer.get());
OperationResult<CqlResult<String, String>> result = bootKeyspace.prepareQuery(CF_TOKENS_NEW)
.withCql(selectClause).execute();
for (Row<String, String> row : result.getResult().getRows())
set.add(transform(row.getColumns()));
} catch (Exception e) {
logger.warn("Caught an Unknown Exception during reading msgs ... -> " + e.getMessage());
throw new RuntimeException(e);
}
return set;
}
public Set<AppsInstance> getAllInstances(String app) {
if (isCassandraCacheExpired() || appInstances.isEmpty()) {
write.lock();
if (isCassandraCacheExpired() || appInstances.isEmpty()) {
logger.debug("lastpull %d msecs ago, getting instances from C*", System.currentTimeMillis() - lastTimeCassandraPull);
appInstances = getAllInstancesFromCassandra(app);
lastTimeCassandraPull = System.currentTimeMillis();
}
write.unlock();
}
read.lock();
Set<AppsInstance> retInstances = appInstances;
read.unlock();
return retInstances;
}
public String findKey(String app, String id, String location, String datacenter) {
try {
final String selectClause = String.format(
"SELECT * FROM %s USING CONSISTENCY LOCAL_QUORUM WHERE %s = '%s' and %s = '%s' and %s = '%s' and %s = '%s' ",
"tokens", CN_APPID, app, CN_ID, id, CN_LOCATION, location, CN_DC, datacenter);
logger.info(selectClause);
final ColumnFamily<String, String> CF_INSTANCES_NEW = ColumnFamily.newColumnFamily(KS_NAME,
StringSerializer.get(), StringSerializer.get());
OperationResult<CqlResult<String, String>> result = bootKeyspace.prepareQuery(CF_INSTANCES_NEW)
.withCql(selectClause).execute();
if (result == null || result.getResult().getRows().size() == 0)
return null;
Row<String, String> row = result.getResult().getRows().getRowByIndex(0);
return row.getKey();
} catch (Exception e) {
logger.warn("Caught an Unknown Exception during find a row matching cluster[" + app + "], id[" + id
+ "], and region[" + datacenter + "] ... -> " + e.getMessage());
throw new RuntimeException(e);
}
}
private AppsInstance transform(ColumnList<String> columns) {
AppsInstance ins = new AppsInstance();
Map<String, String> cmap = new HashMap<String, String>();
for (Column<String> column : columns) {
// logger.info("***Column Name = "+column.getName()+ " Value =
// "+column.getStringValue());
cmap.put(column.getName(), column.getStringValue());
if (column.getName().equals(CN_APPID))
ins.setUpdatetime(column.getTimestamp());
}
ins.setApp(cmap.get(CN_APPID));
ins.setZone(cmap.get(CN_AZ));
ins.setHost(cmap.get(CN_HOSTNAME));
ins.setDynomitePort((cmap.get(CN_DYNOMITE_PORT) != null) ? Integer.parseInt(cmap.get(CN_DYNOMITE_PORT)) : commonConfig.getDynomitePort());
ins.setDynomiteSecurePort((cmap.get(CN_DYNOMITE_SECURE_PORT) != null) ? Integer.parseInt(cmap.get(CN_DYNOMITE_SECURE_PORT)) : commonConfig.getDynomiteSecurePort());
ins.setDynomiteSecureStoragePort((cmap.get(CN_DYNOMITE_SECURE_STORAGE_PORT) != null) ? Integer.parseInt(cmap.get(CN_DYNOMITE_SECURE_STORAGE_PORT)) : commonConfig.getDynomiteSecureStoragePort());
ins.setPeerPort((cmap.get(CN_PEER_PORT) != null) ? Integer.parseInt(cmap.get(CN_PEER_PORT)) : commonConfig.getDynomitePeerPort());
ins.setHostIP(cmap.get(CN_EIP));
ins.setId(Integer.parseInt(cmap.get(CN_ID)));
ins.setInstanceId(cmap.get(CN_INSTANCEID));
ins.setDatacenter(cmap.get(CN_LOCATION));
ins.setRack(cmap.get(CN_DC));
ins.setToken(cmap.get(CN_TOKEN));
return ins;
}
private String getChoosingKey(AppsInstance instance) {
return instance.getApp() + "_" + instance.getRack() + "_" + instance.getId() + "-choosing";
}
private String getLockingKey(AppsInstance instance) {
return instance.getApp() + "_" + instance.getRack() + "_" + instance.getId() + "-lock";
}
private String getRowKey(AppsInstance instance) {
return instance.getApp() + "_" + instance.getRack() + "_" + instance.getId();
}
private AstyanaxContext<Keyspace> initWithThriftDriverWithEurekaHostsSupplier() {
logger.info("BOOT_CLUSTER = {}, KS_NAME = {}", BOOT_CLUSTER, KS_NAME);
return new AstyanaxContext.Builder().forCluster(BOOT_CLUSTER).forKeyspace(KS_NAME)
.withAstyanaxConfiguration(
new AstyanaxConfigurationImpl().setDiscoveryType(NodeDiscoveryType.DISCOVERY_SERVICE))
.withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("MyConnectionPool")
.setMaxConnsPerHost(3).setPort(thriftPortForAstyanax))
.withHostSupplier(hostSupplier.getSupplier(BOOT_CLUSTER))
.withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
.buildKeyspace(ThriftFamilyFactory.getInstance());
}
private AstyanaxContext<Keyspace> initWithThriftDriverWithExternalHostsSupplier() {
logger.info("BOOT_CLUSTER = {}, KS_NAME = {}", BOOT_CLUSTER, KS_NAME);
return new AstyanaxContext.Builder().forCluster(BOOT_CLUSTER).forKeyspace(KS_NAME)
.withAstyanaxConfiguration(
new AstyanaxConfigurationImpl().setDiscoveryType(NodeDiscoveryType.DISCOVERY_SERVICE)
.setConnectionPoolType(ConnectionPoolType.ROUND_ROBIN))
.withConnectionPoolConfiguration(new ConnectionPoolConfigurationImpl("MyConnectionPool")
.setMaxConnsPerHost(3).setPort(thriftPortForAstyanax))
.withHostSupplier(getSupplier()).withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
.buildKeyspace(ThriftFamilyFactory.getInstance());
}
private Supplier<List<Host>> getSupplier() {
return new Supplier<List<Host>>() {
@Override
public List<Host> get() {
List<Host> hosts = new ArrayList<Host>();
List<String> cassHostnames = new ArrayList<String>(
Arrays.asList(StringUtils.split(cassCommonConfig.getCassandraSeeds(), ",")));
if (cassHostnames.size() == 0)
throw new RuntimeException(
"Cassandra Host Names can not be blank. At least one host is needed. Please use getCassandraSeeds() property.");
for (String cassHost : cassHostnames) {
logger.info("Adding Cassandra Host = {}", cassHost);
hosts.add(new Host(cassHost, thriftPortForAstyanax));
}
return hosts;
}
};
}
}
| 3,166 |
0 | Create_ds/dynomite-manager/dynomitemanager-web/src/smokeTest/java/com/netflix | Create_ds/dynomite-manager/dynomitemanager-web/src/smokeTest/java/com/netflix/florida/SmokeTest.java | package com.netflix.florida;
import static io.restassured.RestAssured.*;
import static org.hamcrest.Matchers.*;
import java.io.IOException;
import javax.inject.Named;
import org.junit.Test;
import org.junit.runner.RunWith;
import com.google.inject.Inject;
import com.netflix.archaius.test.TestPropertyOverride;
import com.netflix.governator.guice.jetty.Archaius2JettyModule;
import com.netflix.governator.guice.test.ModulesForTesting;
import com.netflix.governator.guice.test.junit4.GovernatorJunit4ClassRunner;
import com.netflix.florida.startup.FloridaModule;
/**
* This is the one and only one integration test for the whole service.
* We leverage the governator-test-junit library to run the test for us.
*
* We don't do any deep testing here. Our unit tests are supposed to do that. We keep this test simple and we check that
* everything is wired well together and that all of our endpoints are up. Testing the actual content returned
* or that our POSTs work etc. is done in the unit tests. Our unit test do not reach out to the network so it's faster
* to test all possible input/output scenarios there and gain confidence that our business logic works.
*
* @author This file is auto-generated by runtime@netflix.com. Feel free to modify.
*/
public class SmokeTest {
@Test
public void testRestEndpoint() throws IOException {
}
}
| 3,167 |
0 | Create_ds/dynomite-manager/dynomitemanager-web/src/main/java/com/netflix/florida | Create_ds/dynomite-manager/dynomitemanager-web/src/main/java/com/netflix/florida/startup/FloridaModule.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.florida.startup;
import com.google.inject.AbstractModule;
// Common module dependencies
import com.google.inject.Provides;
import com.google.inject.name.Names;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.dynomitemanager.FloridaServer;
import com.netflix.dynomitemanager.aws.S3Backup;
import com.netflix.dynomitemanager.aws.S3Restore;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.dualAccount.AwsRoleAssumptionCredential;
import com.netflix.dynomitemanager.dynomite.DynomiteProcessManager;
import com.netflix.dynomitemanager.dynomite.DynomiteStandardTuner;
import com.netflix.dynomitemanager.dynomite.IDynomiteProcess;
import com.netflix.dynomitemanager.monitoring.JedisFactory;
import com.netflix.dynomitemanager.monitoring.SimpleJedisFactory;
import com.netflix.dynomitemanager.storage.RedisStorageProxy;
import com.netflix.dynomitemanager.storage.StorageProxy;
import javax.inject.Singleton;
import org.quartz.SchedulerFactory;
import org.quartz.impl.StdSchedulerFactory;
import com.netflix.nfsidecar.aws.AWSMembership;
import com.netflix.nfsidecar.aws.AwsInstanceEnvIdentity;
import com.netflix.nfsidecar.aws.IAMCredential;
import com.netflix.nfsidecar.aws.ICredential;
import com.netflix.nfsidecar.backup.Backup;
import com.netflix.nfsidecar.backup.Restore;
import com.netflix.nfsidecar.config.AWSCommonConfig;
import com.netflix.nfsidecar.config.CassCommonConfig;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.identity.IInstanceState;
import com.netflix.nfsidecar.identity.IMembership;
import com.netflix.nfsidecar.identity.InstanceEnvIdentity;
import com.netflix.nfsidecar.instance.InstanceDataRetriever;
import com.netflix.nfsidecar.instance.LocalInstanceDataRetriever;
import com.netflix.nfsidecar.instance.VpcInstanceDataRetriever;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
import com.netflix.nfsidecar.resources.env.InstanceEnvVariables;
import com.netflix.nfsidecar.supplier.HostSupplier;
import com.netflix.nfsidecar.supplier.LocalHostSupplier;
import com.netflix.nfsidecar.tokensdb.CassandraInstanceFactory;
import com.netflix.nfsidecar.tokensdb.IAppsInstanceFactory;
import com.netflix.nfsidecar.utils.ProcessTuner;
import com.netflix.runtime.health.guice.HealthModule;
/**
* This is the "main" module where we wire everything up. If you see this module
* getting overly complex, it's a good idea to break things off into separate
* ones and install them here instead.
*
*/
public final class FloridaModule extends AbstractModule {
@Override
protected void configure() {
install(new HealthModule() {
@Override
protected void configureHealth() {
bindAdditionalHealthIndicator().to(DynomiteProcessManager.class);
bindAdditionalHealthIndicator().to(RedisStorageProxy.class);
}
});
install(new JerseyModule());
install(new ArchaiusModule());
/*
install(new SwaggerServletModule());
install(new JaxrsSwaggerModule());
install(new GuiceServletSwaggerModule());
*/
bind(FloridaServer.class).asEagerSingleton();
bind(ProcessTuner.class).to(DynomiteStandardTuner.class);
bind(SchedulerFactory.class).to(StdSchedulerFactory.class).asEagerSingleton();
bind(IDynomiteProcess.class).to(DynomiteProcessManager.class);
bind(StorageProxy.class).to(RedisStorageProxy.class);
bind(IInstanceState.class).to(InstanceState.class);
bind(JedisFactory.class).to(SimpleJedisFactory.class);
bind(IEnvVariables.class).to(InstanceEnvVariables.class);
/* AWS binding */
//bind(InstanceDataRetriever.class).to(VpcInstanceDataRetriever.class);
bind(IMembership.class).to(AWSMembership.class);
bind(ICredential.class).to(IAMCredential.class);
bind(ICredential.class).annotatedWith(Names.named("awsroleassumption")).to(AwsRoleAssumptionCredential.class);
bind(InstanceEnvIdentity.class).to(AwsInstanceEnvIdentity.class);
bind(Backup.class).to(S3Backup.class);
bind(Restore.class).to(S3Restore.class);
/* Local */
bind(InstanceDataRetriever.class).to(LocalInstanceDataRetriever.class);
/* Netflix */
bind(IAppsInstanceFactory.class).to(CassandraInstanceFactory.class);
bind(HostSupplier.class).to(LocalHostSupplier.class);
// bind(HostSupplier.class).to(CassandraLocalHostsSupplier.class);
// bind(HostSupplier.class).to(EurekaHostsSupplier.class);
}
@Provides
@Singleton
CommonConfig getCommonConfig(ConfigProxyFactory factory) {
return factory.newProxy(CommonConfig.class);
}
@Provides
@Singleton
CassCommonConfig getCassCommonConfig(ConfigProxyFactory factory) {
return factory.newProxy(CassCommonConfig.class);
}
@Provides
@Singleton
AWSCommonConfig getAWSCommonConfig(ConfigProxyFactory factory) {
return factory.newProxy(AWSCommonConfig.class);
}
@Provides
@Singleton
FloridaConfig getFloridaConfig(ConfigProxyFactory factory) {
return factory.newProxy(FloridaConfig.class);
}
}
| 3,168 |
0 | Create_ds/dynomite-manager/dynomitemanager-web/src/main/java/com/netflix/florida | Create_ds/dynomite-manager/dynomitemanager-web/src/main/java/com/netflix/florida/startup/Florida.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.florida.startup;
import com.google.inject.Injector;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.governator.InjectorBuilder;
import com.netflix.governator.guice.jetty.Archaius2JettyModule;
import com.netflix.governator.guice.servlet.WebApplicationInitializer;
/**
* The "main" class that boots up the service. When it's deployed within a servlet container such
* as Tomcat, only the createInjector() is called. For local testing one simply invokes the
* main() method as if running a normal Java app.
*
* @author This file is auto-generated by runtime@netflix.com. Feel free to modify.
*/
public class Florida implements WebApplicationInitializer {
public static void main(String[] args) throws Exception {
InjectorBuilder.fromModules(
new FloridaModule(),
new Archaius2JettyModule(),
new ArchaiusModule() {
@Override
protected void configureArchaius() {
bindApplicationConfigurationOverrideResource("laptop");
}
}).createInjector().awaitTermination();
}
@Override
public Injector createInjector() {
return InjectorBuilder.fromModules(new FloridaModule()).createInjector();
}
}
| 3,169 |
0 | Create_ds/dynomite-manager/dynomitemanager-web/src/main/java/com/netflix/florida | Create_ds/dynomite-manager/dynomitemanager-web/src/main/java/com/netflix/florida/startup/JerseyModule.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.florida.startup;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Provides;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.guice.JerseyServletModule;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
/**
*
* @author Viren
*
*/
public final class JerseyModule extends JerseyServletModule {
@Override
protected void configureServlets() {
filter("/*").through(apiOriginFilter());
Map<String, String> jerseyParams = new HashMap<>();
jerseyParams.put("com.sun.jersey.config.feature.FilterForwardOn404", "true");
jerseyParams.put("com.sun.jersey.config.property.WebPageContentRegex",
"/(((webjars|api-docs|swagger-ui/docs|manage)/.*)|(favicon\\.ico))");
jerseyParams.put(PackagesResourceConfig.PROPERTY_PACKAGES,
"com.netflix.dynomitemanager.resources;io.swagger.jaxrs.json;io.swagger.jaxrs.listing");
jerseyParams.put(ResourceConfig.FEATURE_DISABLE_WADL, "false");
serve("/api/*").with(GuiceContainer.class, jerseyParams);
}
@Provides
@Singleton
public ObjectMapper objectMapper() {
final ObjectMapper om = new ObjectMapper();
om.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
om.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false);
om.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false);
om.setSerializationInclusion(Include.NON_NULL);
om.setSerializationInclusion(Include.NON_EMPTY);
return om;
}
@Provides
@Singleton
public Filter apiOriginFilter() {
return new Filter() {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
HttpServletResponse res = (HttpServletResponse) response;
if (!res.containsHeader("Access-Control-Allow-Origin")) {
res.setHeader("Access-Control-Allow-Origin", "*");
}
res.addHeader("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT");
res.addHeader("Access-Control-Allow-Headers", "Content-Type, api_key, Authorization");
chain.doFilter(request, response);
}
@Override
public void destroy() {
}
};
}
@Override
public boolean equals(Object obj) {
return obj != null && getClass().equals(obj.getClass());
}
@Override
public int hashCode() {
return getClass().hashCode();
}
}
| 3,170 |
0 | Create_ds/collapstring/ext | Create_ds/collapstring/ext/collapstring/CollapstringService.java | import org.jruby.Ruby;
import org.jruby.RubyModule;
import org.jruby.RubyString;
import org.jruby.anno.JRubyMethod;
import org.jruby.anno.JRubyModule;
import org.jruby.runtime.builtin.IRubyObject;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.load.BasicLibraryService;
import org.jruby.util.ByteList;
import java.io.IOException;
import java.util.Random;
@JRubyModule(name = "Collapstring")
public class CollapstringService implements BasicLibraryService {
@Override
public boolean basicLoad(Ruby ruby) throws IOException {
final RubyModule module = ruby.defineModule("Collapstring");
module.defineAnnotatedMethods(CollapstringService.class);
return true;
}
@JRubyMethod(name = "collapse!", required = 1, module = true)
public static IRubyObject collapse_bang(IRubyObject self, IRubyObject src) {
final RubyString srcString = src.convertToString();
srcString.modify();
collapseBytes(srcString.getByteList());
return src;
}
@JRubyMethod(name = "collapse", required = 1, module = true)
public static IRubyObject collapse(IRubyObject self, IRubyObject src) {
final IRubyObject res = src.dup();
collapse_bang(self, res);
return res;
}
@JRubyMethod(name = "fuzz", required = 1, module = true)
public static IRubyObject fuzz(IRubyObject self, IRubyObject length) {
final Random rnd = new Random();
final long llen = length.convertToInteger().getLongValue();
final int len = (int) llen;
final byte[] bytes = new byte[len];
for (int i = 0; i < len; i++) {
switch (rnd.nextInt(4)) {
case 0:
bytes[i] = '\'';
break;
case 1:
bytes[i] = '\"';
break;
case 2:
bytes[i] = '\\';
break;
case 3:
bytes[i] = ' ';
break;
}
}
return RubyString.newString(self.getRuntime(), bytes);
}
private static void collapseBytes(ByteList bytes) {
final int len = bytes.length();
State state = State.OUT;
for (int src_idx = 0, dst_idx = 0; src_idx < len; src_idx++) {
final int cur = bytes.get(src_idx);
switch (cur) {
case '\"':
switch (state) {
case OUT:
bytes.set(dst_idx++, cur);
state = State.IN_DOUBLE;
break;
case OUT_BACKSLASH:
case IN_DOUBLE:
bytes.set(dst_idx++, cur);
state = State.OUT;
break;
case IN_DOUBLE_BACKSLASH:
state = State.IN_DOUBLE;
/* fallthrough */
case IN_SINGLE:
break;
case IN_SINGLE_BACKSLASH:
state = State.IN_SINGLE;
break;
}
break;
case '\'':
switch (state) {
case OUT:
bytes.set(dst_idx++, cur);
state = State.IN_SINGLE;
break;
case OUT_BACKSLASH:
case IN_SINGLE:
bytes.set(dst_idx++, cur);
state = State.OUT;
break;
case IN_SINGLE_BACKSLASH:
state = State.IN_SINGLE;
break;
case IN_DOUBLE_BACKSLASH:
state = State.IN_DOUBLE;
/* fallthrough */
case IN_DOUBLE:
break;
}
break;
case '\\':
switch (state) {
case IN_SINGLE:
state = State.IN_SINGLE_BACKSLASH;
break;
case IN_SINGLE_BACKSLASH:
state = State.IN_SINGLE;
break;
case IN_DOUBLE:
state = State.IN_DOUBLE_BACKSLASH;
break;
case IN_DOUBLE_BACKSLASH:
state = State.IN_DOUBLE;
break;
case OUT:
bytes.set(dst_idx++, cur);
state = State.OUT_BACKSLASH;
break;
case OUT_BACKSLASH:
bytes.set(dst_idx++, cur);
state = State.OUT;
break;
}
break;
default:
switch (state) {
case IN_SINGLE_BACKSLASH:
state = State.IN_SINGLE;
break;
case IN_DOUBLE_BACKSLASH:
state = State.IN_DOUBLE;
break;
case OUT:
bytes.set(dst_idx++, cur);
break;
case OUT_BACKSLASH:
bytes.set(dst_idx++, cur);
state = State.OUT;
break;
case IN_SINGLE:
case IN_DOUBLE:
break;
}
}
bytes.setRealSize(dst_idx);
}
}
private static enum State {
OUT,
OUT_BACKSLASH,
IN_SINGLE,
IN_SINGLE_BACKSLASH,
IN_DOUBLE,
IN_DOUBLE_BACKSLASH
}
}
| 3,171 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-eureka-plugin/src/main/java/netflix/adminresources | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-eureka-plugin/src/main/java/netflix/adminresources/resources/EurekaResource.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package netflix.adminresources.resources;
import com.google.common.annotations.Beta;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.discovery.shared.Application;
import com.netflix.discovery.shared.Applications;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.ArrayList;
import java.util.List;
/**
* @author pkamath
* @author Nitesh Kant
*/
@Beta
@Path("/eureka")
@Produces(MediaType.APPLICATION_JSON)
public class EurekaResource {
private static final Logger logger = LoggerFactory.getLogger(EurekaResource.class);
@GET
public Response getEurekaDetails() {
List<EurekaInstanceInfo> instanceInfoList = new ArrayList<EurekaInstanceInfo>();
DiscoveryClient discoveryClient = DiscoveryManager.getInstance().getDiscoveryClient();
if (null != discoveryClient) {
Applications apps = discoveryClient.getApplications();
for (Application app : apps.getRegisteredApplications()) {
for (InstanceInfo inst : app.getInstances()) {
instanceInfoList.add(new EurekaInstanceInfo(inst.getAppName(), inst.getId(), inst.getStatus().name(), inst.getIPAddr(), inst.getHostName()));
}
}
}
GsonBuilder gsonBuilder = new GsonBuilder().serializeNulls();
Gson gson = gsonBuilder.create();
String response = gson.toJson(new KaryonAdminResponse(instanceInfoList));
return Response.ok(response).build();
}
private static class EurekaInstanceInfo {
private String application;
private String id;
private String status;
private String ipAddress;
private String hostName;
private EurekaInstanceInfo(String application, String id, String status, String ipAddress, String hostName) {
this.application = application;
this.id = id;
this.status = status;
this.ipAddress = ipAddress;
this.hostName = hostName;
}
public String getApplication() {
return application;
}
public String getId() {
return id;
}
public String getStatus() {
return status;
}
public String getIpAddress() {
return ipAddress;
}
public String getHostName() {
return hostName;
}
}
}
| 3,172 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-eureka-plugin/src/main/java/netflix/adminresources | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-eureka-plugin/src/main/java/netflix/adminresources/pages/EurekaPage.java | package netflix.adminresources.pages;
import netflix.adminresources.AbstractAdminPageInfo;
import netflix.adminresources.AdminPage;
@AdminPage
public class EurekaPage extends AbstractAdminPageInfo {
public static final String PAGE_ID = "eureka";
public static final String NAME = "Eureka";
public EurekaPage() {
super(PAGE_ID, NAME);
}
} | 3,173 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon-admin-healthcheck-plugin/src/test/java/netflix | Create_ds/karyon/karyon2-admin-plugins/karyon-admin-healthcheck-plugin/src/test/java/netflix/adminresources/HealthCheckResourceTest.java | package netflix.adminresources;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.config.ConfigurationManager;
import com.netflix.karyon.server.eureka.HealthCheckInvocationStrategy;
import com.netflix.karyon.server.eureka.SyncHealthCheckInvocationStrategy;
import com.netflix.karyon.spi.HealthCheckHandler;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import javax.ws.rs.core.Response;
import static org.junit.Assert.assertEquals;
import netflix.admin.AdminConfigImpl;
public class HealthCheckResourceTest {
private AdminResourcesContainer container;
@After
public void tearDown() throws Exception {
final AbstractConfiguration configInst = ConfigurationManager.getConfigInstance();
configInst.clearProperty(AdminConfigImpl.CONTAINER_LISTEN_PORT);
if (container != null) {
container.shutdown();
}
}
@Before
public void init() {
System.setProperty(AdminConfigImpl.CONTAINER_LISTEN_PORT, "0");
System.setProperty(AdminConfigImpl.NETFLIX_ADMIN_RESOURCE_CONTEXT, "/jr");
}
@Test
public void goodHealth() throws Exception {
checkHealth(goodHealthHandler(), Response.Status.OK.getStatusCode());
}
@Test
public void badHealth() throws Exception {
checkHealth(badHealthHandler(), Response.Status.INTERNAL_SERVER_ERROR.getStatusCode());
}
private void checkHealth(HealthCheckHandler healthCheckHandler, int respStatus) throws Exception {
final AdminResourcesContainer adminResourcesContainer = buildAdminResourcesContainer(healthCheckHandler);
adminResourcesContainer.init();
final int adminPort = adminResourcesContainer.getServerPort();
HttpClient client = new DefaultHttpClient();
HttpGet healthGet =
new HttpGet(String.format("http://localhost:%d/jr/v2/healthcheck", adminPort));
HttpResponse response = client.execute(healthGet);
assertEquals("admin resource health check resource failed.", respStatus, response.getStatusLine().getStatusCode());
adminResourcesContainer.shutdown();
}
private AdminResourcesContainer buildAdminResourcesContainer(final HealthCheckHandler healthCheckHandler) throws Exception {
final Injector appInjector = Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
bind(HealthCheckHandler.class).toInstance(healthCheckHandler);
bind(HealthCheckInvocationStrategy.class).to(SyncHealthCheckInvocationStrategy.class);
}
});
return appInjector.getInstance(AdminResourcesContainer.class);
}
private HealthCheckHandler goodHealthHandler() {
return new HealthCheckHandler() {
@Override
public int getStatus() {
return Response.Status.OK.getStatusCode();
}
};
}
private HealthCheckHandler badHealthHandler() {
return new HealthCheckHandler() {
@Override
public int getStatus() {
return Response.Status.INTERNAL_SERVER_ERROR.getStatusCode();
}
};
}
}
| 3,174 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon-admin-healthcheck-plugin/src/main/java/netflix | Create_ds/karyon/karyon2-admin-plugins/karyon-admin-healthcheck-plugin/src/main/java/netflix/adminresources/HealthCheckPlugin.java | package netflix.adminresources;
@AdminPage
public class HealthCheckPlugin extends AbstractAdminPageInfo {
public static final String PAGE_ID = "karyon_healthCheck";
public static final String NAME = "HealthCheck";
public HealthCheckPlugin() {
super(PAGE_ID, NAME);
}
@Override
public String getJerseyResourcePackageList() {
return "com.netflix.adminresources";
}
@Override
public boolean isVisible() {
return false;
}
}
| 3,175 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon-admin-healthcheck-plugin/src/main/java/com/netflix | Create_ds/karyon/karyon2-admin-plugins/karyon-admin-healthcheck-plugin/src/main/java/com/netflix/adminresources/HealthcheckResource.java | package com.netflix.adminresources;
import com.google.inject.Inject;
import com.netflix.karyon.server.eureka.HealthCheckInvocationStrategy;
import com.sun.jersey.spi.resource.Singleton;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Response;
import java.util.concurrent.TimeoutException;
@Path("/v2/healthcheck")
@Singleton
public class HealthcheckResource {
@Inject(optional = true)
private HealthCheckInvocationStrategy invocationStrategy;
@GET
public Response doHealthCheck() {
if (null != invocationStrategy) {
try {
int status = invocationStrategy.invokeCheck();
return Response.status(status).build();
} catch (TimeoutException e) {
return Response.status(Response.Status.SERVICE_UNAVAILABLE).build();
}
} else {
return Response.status(Response.Status.NOT_FOUND).build();
}
}
}
| 3,176 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-healthcheck-plugin/src/test/java/netflix | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-healthcheck-plugin/src/test/java/netflix/adminresources/HealthCheckResourceTest.java | package netflix.adminresources;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.config.ConfigurationManager;
import netflix.admin.AdminConfigImpl;
import netflix.karyon.health.HealthCheckHandler;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import javax.ws.rs.core.Response;
import java.lang.reflect.Field;
import static org.junit.Assert.assertEquals;
public class HealthCheckResourceTest {
private AdminResourcesContainer container;
@After
public void tearDown() throws Exception {
final AbstractConfiguration configInst = ConfigurationManager.getConfigInstance();
configInst.clearProperty(AdminConfigImpl.CONTAINER_LISTEN_PORT);
if (container != null) {
container.shutdown();
}
}
@Before
public void init() {
System.setProperty(AdminConfigImpl.CONTAINER_LISTEN_PORT, "0");
System.setProperty(AdminConfigImpl.NETFLIX_ADMIN_RESOURCE_CONTEXT, "/jr");
}
@Test
public void goodHealth() throws Exception {
checkHealth(goodHealthHandler(), Response.Status.OK.getStatusCode());
}
@Test
public void badHealth() throws Exception {
checkHealth(badHealthHandler(), Response.Status.INTERNAL_SERVER_ERROR.getStatusCode());
}
private void checkHealth(HealthCheckHandler healthCheckHandler, int respStatus) throws Exception {
final AdminResourcesContainer adminResourcesContainer = buildAdminResourcesContainer(healthCheckHandler);
adminResourcesContainer.init();
final int adminPort = adminResourcesContainer.getServerPort();
HttpClient client = new DefaultHttpClient();
HttpGet healthGet =
new HttpGet(String.format("http://localhost:%d/jr/healthcheck", adminPort));
HttpResponse response = client.execute(healthGet);
assertEquals("admin resource health check resource failed.", respStatus, response.getStatusLine().getStatusCode());
adminResourcesContainer.shutdown();
}
private AdminResourcesContainer buildAdminResourcesContainer(final HealthCheckHandler healthCheckHandler) throws Exception {
AdminResourcesContainer container = new AdminResourcesContainer();
final Field injectorField = AdminResourcesContainer.class.getDeclaredField("appInjector");
final Injector appInjector = Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
bind(HealthCheckHandler.class).toInstance(healthCheckHandler);
}
});
injectorField.setAccessible(true);
injectorField.set(container, appInjector);
return container;
}
private HealthCheckHandler goodHealthHandler() {
return new HealthCheckHandler() {
@Override
public int getStatus() {
return Response.Status.OK.getStatusCode();
}
};
}
private HealthCheckHandler badHealthHandler() {
return new HealthCheckHandler() {
@Override
public int getStatus() {
return Response.Status.INTERNAL_SERVER_ERROR.getStatusCode();
}
};
}
}
| 3,177 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-healthcheck-plugin/src/main/java/netflix | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-healthcheck-plugin/src/main/java/netflix/adminresources/HealthCheckResource.java | package netflix.adminresources;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import netflix.karyon.health.HealthCheckHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@Path("/healthcheck")
@Produces(MediaType.TEXT_HTML)
@Singleton
public class HealthCheckResource {
private static final Logger logger = LoggerFactory.getLogger(HealthCheckResource.class);
private HealthCheckHandler healthCheckHandler;
@Inject
public HealthCheckResource(HealthCheckHandler healthCheckHandler) {
this.healthCheckHandler = healthCheckHandler;
}
@GET
public Response getHealthCheck() {
try {
final int status = healthCheckHandler.getStatus();
return Response.ok().status(status).build();
} catch (Exception e) {
logger.error("Exception in HealthCheckResource -- ", e);
}
return Response.status(Response.Status.BAD_REQUEST.getStatusCode()).build();
}
}
| 3,178 |
0 | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-healthcheck-plugin/src/main/java/netflix | Create_ds/karyon/karyon2-admin-plugins/karyon2-admin-healthcheck-plugin/src/main/java/netflix/adminresources/HealthCheckPlugin.java | package netflix.adminresources;
@AdminPage
public class HealthCheckPlugin extends AbstractAdminPageInfo {
public static final String PAGE_ID = "karyon2_healthCheck";
public static final String NAME = "HealthCheck";
public HealthCheckPlugin() {
super(PAGE_ID, NAME);
}
@Override
public String getJerseyResourcePackageList() {
return "netflix.adminresources";
}
@Override
public boolean isVisible() {
return false;
}
}
| 3,179 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/test/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/test/java/netflix/karyon/jersey/blocking/JsonReadingResource.java | package netflix.karyon.jersey.blocking;
import javax.ws.rs.Consumes;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
@Path("/test")
public class JsonReadingResource {
private final ObjectMapper mapper = new ObjectMapper();
@SuppressWarnings("unused")
@POST
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
public Response processJson( String payload ) {
try {
System.out.println( "processing payload size: '" + payload.length() + "'" );
JsonNode tree = mapper.readTree( payload );
return Response.ok().build();
}
catch( Exception e ) {
System.err.println( "ERROR:" + e.getMessage() );
return Response.serverError().build();
}
}
}
| 3,180 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/test/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/test/java/netflix/karyon/jersey/blocking/JerseyBlockingModule.java | package netflix.karyon.jersey.blocking;
import static com.netflix.config.ConfigurationManager.getConfigInstance;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import netflix.karyon.KaryonBootstrap;
import rx.Observable;
import netflix.karyon.transport.interceptor.DuplexInterceptor;
import com.google.inject.AbstractModule;
import com.google.inject.Singleton;
import com.netflix.governator.annotations.Modules;
@KaryonBootstrap( name = "jersey-blocking" )
@Singleton
@Modules(include = {
JerseyBlockingModule.TestModule.class,
JerseyBlockingModule.KaryonRxRouterModuleImpl.class,
})
public interface JerseyBlockingModule {
class TestModule extends AbstractModule {
@Override
protected void configure() {
getConfigInstance().addProperty("com.sun.jersey.config.property.packages", "netflix.karyon.jersey.blocking");
}
}
class KaryonRxRouterModuleImpl extends KaryonJerseyModule {
@Override
protected void configureServer() {
server().port( 7001 ).threadPoolSize( 200 );
}
}
public class AccessInterceptor implements DuplexInterceptor<HttpServerRequest<ByteBuf>, HttpServerResponse<ByteBuf>> {
@Override
public Observable<Void> in(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
return Observable.empty();
}
@Override
public Observable<Void> out(HttpServerResponse<ByteBuf> response) {
return Observable.empty();
}
}
}
| 3,181 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/test/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/test/java/netflix/karyon/jersey/blocking/JerseyBlockingTest.java | package netflix.karyon.jersey.blocking;
import static org.junit.Assert.assertEquals;
import io.netty.util.ResourceLeakDetector;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Filter;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import netflix.karyon.Karyon;
import netflix.karyon.KaryonServer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import com.netflix.governator.guice.BootstrapModule;
public class JerseyBlockingTest {
private static KaryonServer server;
static ByteArrayOutputStream buffer;
@BeforeClass
public static void setUpBefore() throws Exception {
server = Karyon.forApplication( JerseyBlockingModule.class, (BootstrapModule[])null );
server.start();
}
@AfterClass
public static void cleanUpAfter() throws Exception {
server.shutdown();
}
private static int postData( String path, String payload ) throws IOException {
URL url = new URL( path );
HttpURLConnection con = (HttpURLConnection)url.openConnection();
con.setRequestMethod("POST");
con.setRequestProperty("Content-Type","application/json");
con.setDoOutput(true);
con.setDoInput(true);
con.setConnectTimeout( 10000 );
con.setReadTimeout( 20000 );
con.getOutputStream().write( payload.getBytes("UTF-8") );
con.getOutputStream().flush();
con.getOutputStream().close();
int status = con.getResponseCode();
if( status != 200 ) {
return status;
}
//read the response
byte[] buffer = new byte[ 1024 ];
while( con.getInputStream().read( buffer ) > 0 ) {
;
}
return 200;
}
private String makePayload( int size ) {
StringBuilder buffer = new StringBuilder();
buffer.append("{\"key\":\"");
for( int i = 0; i < size; ++i ) {
buffer.append(( Byte.toString( (byte)( i & 0xFF ) ) ) );
}
return buffer.append("\"}").toString();
}
@Test
public void runJerseyTest() throws InterruptedException {
ExecutorService service = Executors.newCachedThreadPool();
final Random rnd = new Random();
final AtomicInteger errors = new AtomicInteger();
//let Netty blow in our face
ResourceLeakDetector.setLevel( ResourceLeakDetector.Level.PARANOID );
//tap to the logger, so we can catch leak error
Logger.getLogger( "io.netty.util.ResourceLeakDetector" ).setFilter( new Filter() {
@Override
public boolean isLoggable(LogRecord record) {
if( record.getLevel() == Level.SEVERE && record.getMessage().contains("LEAK") ) {
errors.incrementAndGet();
}
return true;
}
});
for( int i = 0; i < 200; ++i ) {
service.execute( new Runnable() {
@Override
public void run() {
try {
int response = postData("http://localhost:7001/test", makePayload( Math.max(1, rnd.nextInt( 1024 ) ) ) );
if( response != 200 ) {
errors.addAndGet( 1 );
}
}
catch( Exception e ) {
errors.addAndGet( 1 ); }
}
});
Thread.sleep( rnd.nextInt( 100 ) );
}
//aid netty leak detection
System.gc();
for( int i = 0; i < 100; ++i ) {
try {
//do not exceeded Netty content length ~1M
int response = postData("http://localhost:7001/test", makePayload( Math.max(1, rnd.nextInt( 127 * 1024 ) ) ) );
if( response != 200 ) {
errors.addAndGet( 1 );
}
}
catch( Exception e ) {
errors.addAndGet( 1 );
}
//aid netty leak detection
System.gc();
}
service.shutdown();
service.awaitTermination( 100, TimeUnit.SECONDS );
assertEquals( "Errors: ", 0, errors.intValue() );
}
}
| 3,182 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/ServiceIteratorProviderImpl.java | package netflix.karyon.jersey.blocking;
import com.google.common.collect.Iterators;
import com.sun.jersey.spi.container.ContainerProvider;
import com.sun.jersey.spi.service.ServiceFinder;
import java.util.Iterator;
/**
* @author Nitesh Kant
*/
class ServiceIteratorProviderImpl<T> extends ServiceFinder.ServiceIteratorProvider<T> {
static {
/**
* This iterator provider override makes it possible to not mandate the presence of a jar with a META-INF/ based
* Service provider discovery which is the default for jersey.
*/
ServiceFinder.setIteratorProvider(new ServiceIteratorProviderImpl());
}
@SuppressWarnings("rawtypes")
private static final Iterator<? extends ContainerProvider> nettyContainerProviderIter =
Iterators.singletonIterator(new NettyContainerProvider());
private final ServiceFinder.DefaultServiceIteratorProvider<T> defaultProvider;
ServiceIteratorProviderImpl() {
defaultProvider = new ServiceFinder.DefaultServiceIteratorProvider<T>();
}
public static void registerWithJersey() {
// Static block does the register.
}
@Override
@SuppressWarnings("unchecked")
public Iterator<T> createIterator(Class<T> service, String serviceName, ClassLoader loader,
boolean ignoreOnClassNotFound) {
Iterator<T> defaultIterator = defaultProvider.createIterator(service, serviceName, loader, ignoreOnClassNotFound);
if (service.isAssignableFrom(NettyContainerProvider.class)) {
return (Iterator<T>) Iterators.concat(defaultIterator, nettyContainerProviderIter);
}
return defaultIterator;
}
@Override
@SuppressWarnings("unchecked")
public Iterator<Class<T>> createClassIterator(Class<T> service, String serviceName, ClassLoader loader,
boolean ignoreOnClassNotFound) {
return defaultProvider.createClassIterator(service, serviceName, loader, ignoreOnClassNotFound);
}
}
| 3,183 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/NettyContainer.java | package netflix.karyon.jersey.blocking;
import com.sun.jersey.spi.container.WebApplication;
/**
* @author Nitesh Kant
*/
public class NettyContainer {
private final WebApplication application;
private final NettyToJerseyBridge nettyToJerseyBridge;
public NettyContainer(WebApplication application) {
this.application = application;
nettyToJerseyBridge = new NettyToJerseyBridge(application);
}
NettyToJerseyBridge getNettyToJerseyBridge() {
return nettyToJerseyBridge;
}
WebApplication getApplication() {
return application;
}
}
| 3,184 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/JerseyBasedRouter.java | package netflix.karyon.jersey.blocking;
import com.google.inject.Injector;
import com.sun.jersey.api.container.ContainerFactory;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.guice.spi.container.GuiceComponentProviderFactory;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerResponseWriter;
import com.sun.jersey.spi.container.WebApplication;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import io.reactivex.netty.protocol.http.server.RequestHandler;
import java.io.InputStream;
import netflix.karyon.transport.util.HttpContentInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscriber;
import rx.functions.Action0;
import rx.schedulers.Schedulers;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import java.io.IOException;
/**
* @author Nitesh Kant
*/
public class JerseyBasedRouter implements RequestHandler<ByteBuf, ByteBuf> {
private static final Logger logger = LoggerFactory.getLogger(JerseyBasedRouter.class);
private final ResourceConfig resourceConfig;
private final Injector injector;
private WebApplication application;
private NettyToJerseyBridge nettyToJerseyBridge;
public JerseyBasedRouter() {
this(null);
}
@Inject
public JerseyBasedRouter(Injector injector) {
this.injector = injector;
resourceConfig = new PropertiesBasedResourceConfig();
ServiceIteratorProviderImpl.registerWithJersey();
}
@Override
public Observable<Void> handle(final HttpServerRequest<ByteBuf> request, final HttpServerResponse<ByteBuf> response) {
/*
* Creating the Container request eagerly, subscribes to the request content eagerly. Failure to do so, will
* result in expiring/loss of content.
*/
//we have to close input stream, to emulate normal lifecycle
final InputStream requestData = new HttpContentInputStream( response.getAllocator(), request.getContent() );
final ContainerRequest containerRequest = nettyToJerseyBridge.bridgeRequest( request, requestData );
final ContainerResponseWriter containerResponse = nettyToJerseyBridge.bridgeResponse(response);
return Observable.create(new Observable.OnSubscribe<Void>() {
@Override
public void call(Subscriber<? super Void> subscriber) {
try {
application.handleRequest(containerRequest, containerResponse);
subscriber.onCompleted();
} catch (IOException e) {
logger.error("Failed to handle request.", e);
subscriber.onError(e);
}
finally {
//close input stream and release all data we buffered, ignore errors
try {
requestData.close();
}
catch( IOException e ) {
}
}
}
}).doOnTerminate(new Action0() {
@Override
public void call() {
response.close(true); /* Since this runs in a different thread, it needs an explicit flush,
else the LastHttpContent will never be flushed and the client will not finish.*/
}
}).subscribeOn(Schedulers.io()) /*Since this blocks on subscription*/;
}
@PostConstruct
public void start() {
NettyContainer container;
if (null != injector) {
container = ContainerFactory.createContainer(NettyContainer.class, resourceConfig,
new GuiceComponentProviderFactory(resourceConfig, injector));
} else {
container = ContainerFactory.createContainer(NettyContainer.class, resourceConfig);
}
application = container.getApplication();
nettyToJerseyBridge = container.getNettyToJerseyBridge();
logger.info("Started Jersey based request router.");
}
@PreDestroy
public void stop() {
logger.info("Stopped Jersey based request router.");
application.destroy();
}
}
| 3,185 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/NettyContainerProvider.java | package netflix.karyon.jersey.blocking;
import com.google.common.base.Preconditions;
import com.sun.jersey.api.container.ContainerException;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.spi.container.ContainerProvider;
import com.sun.jersey.spi.container.WebApplication;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Nitesh Kant
*/
public class NettyContainerProvider implements ContainerProvider<NettyContainer> {
private static final Logger logger = LoggerFactory.getLogger(NettyContainerProvider.class);
@Override
public NettyContainer createContainer(Class<NettyContainer> type, ResourceConfig resourceConfig,
WebApplication application) throws ContainerException {
Preconditions.checkNotNull(type);
Preconditions.checkNotNull(application);
if (!type.equals(NettyContainer.class)) {
logger.error(
"Netty container provider can only create container of type {}. Invoked to create container of type {}",
NettyContainer.class.getName(), type.getName());
}
return new NettyContainer(application);
}
}
| 3,186 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/NettyToJerseyBridge.java | package netflix.karyon.jersey.blocking;
import com.sun.jersey.core.header.InBoundHeaders;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseWriter;
import com.sun.jersey.spi.container.WebApplication;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufOutputStream;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.reactivex.netty.protocol.http.server.HttpRequestHeaders;
import io.reactivex.netty.protocol.http.server.HttpResponseHeaders;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
*
*
* @author Nitesh Kant
*/
final class NettyToJerseyBridge {
private static final Logger logger = LoggerFactory.getLogger(NettyToJerseyBridge.class);
private final WebApplication application;
NettyToJerseyBridge(WebApplication application) {
this.application = application;
}
ContainerRequest bridgeRequest(final HttpServerRequest<ByteBuf> nettyRequest, InputStream requestData ) {
try {
URI baseUri = new URI("/"); // Since the netty server does not have a context path element as such, so base uri is always /
URI uri = new URI(nettyRequest.getUri());
return new ContainerRequest(application, nettyRequest.getHttpMethod().name(),
baseUri, uri, new JerseyRequestHeadersAdapter(nettyRequest.getHeaders()),
requestData );
} catch (URISyntaxException e) {
logger.error(String.format("Invalid request uri: %s", nettyRequest.getUri()), e);
throw new IllegalArgumentException(e);
}
}
ContainerResponseWriter bridgeResponse(final HttpServerResponse<ByteBuf> serverResponse) {
return new ContainerResponseWriter() {
private final ByteBuf contentBuffer = serverResponse.getChannel().alloc().buffer();
@Override
public OutputStream writeStatusAndHeaders(long contentLength, ContainerResponse response) {
int responseStatus = response.getStatus();
serverResponse.setStatus(HttpResponseStatus.valueOf(responseStatus));
HttpResponseHeaders responseHeaders = serverResponse.getHeaders();
for(Map.Entry<String, List<Object>> header : response.getHttpHeaders().entrySet()){
responseHeaders.setHeader(header.getKey(), header.getValue());
}
return new ByteBufOutputStream(contentBuffer);
}
@Override
public void finish() {
serverResponse.writeAndFlush(contentBuffer);
}
};
}
private static class JerseyRequestHeadersAdapter extends InBoundHeaders {
private static final long serialVersionUID = 2303297923762115950L;
private final HttpRequestHeaders requestHeaders;
private Set<Map.Entry<String, List<String>>> entrySet;
private Collection<List<String>> values;
private JerseyRequestHeadersAdapter(HttpRequestHeaders requestHeaders) {
this.requestHeaders = requestHeaders;
}
@Override
public void putSingleObject(String key, Object value) {
throw new UnsupportedOperationException("No modifications allowed on request headers."); // The API is sad
}
@Override
public void addObject(String key, Object value) {
throw new UnsupportedOperationException("No modifications allowed on request headers."); // The API is sad
}
@Override
public <A> List<A> get(String key, Class<A> type) {
if (!type.isAssignableFrom(String.class)) {
return Collections.emptyList();
}
@SuppressWarnings("unchecked")
List<A> values = (List<A>) requestHeaders.getAll(key);
return values;
}
@Override
public <A> A getFirst(String key, Class<A> type) {
List<A> values = get(key, type);
return null != values && !values.isEmpty() ? values.get(0) : null;
}
@Override
public <A> A getFirst(String key, A defaultValue) {
@SuppressWarnings("unchecked")
A value = (A) getFirst(key, defaultValue.getClass());
return null != value ? value : defaultValue;
}
@Override
public void putSingle(String key, String value) {
throw new UnsupportedOperationException("No modifications allowed on request headers."); // The API is sad
}
@Override
public void add(String key, String value) {
throw new UnsupportedOperationException("No modifications allowed on request headers."); // The API is sad
}
@Override
public String getFirst(String key) {
return getFirst(key, String.class);
}
@Override
protected List<String> getList(String key) {
return get(key, String.class);
}
@Override
public boolean containsValue(Object value) {
List<Map.Entry<String, String>> entries = requestHeaders.entries();
for (Map.Entry<String, String> entry : entries) {
if (value.equals(entry.getValue())) {
return true;
}
}
return false;
}
@Override
public List<String> get(Object key) {
return getList(String.valueOf(key));
}
@Override
public void clear() {
throw new UnsupportedOperationException("No modifications allowed on request headers."); // The API is sad
}
@Override
protected boolean removeEldestEntry(Map.Entry<String, List<String>> eldest) {
throw new UnsupportedOperationException("No modifications allowed on request headers."); // The API is sad
}
@Override
public int size() {
return requestHeaders.names().size();
}
@Override
public boolean isEmpty() {
return requestHeaders.names().isEmpty();
}
@Override
public boolean containsKey(Object key) {
return requestHeaders.contains(String.valueOf(key));
}
@Override
public List<String> put(String key, List<String> value) {
throw new UnsupportedOperationException("No modifications allowed on request headers.");
}
@Override
public void putAll(Map<? extends String, ? extends List<String>> m) {
throw new UnsupportedOperationException("No modifications allowed on request headers.");
}
@Override
public List<String> remove(Object key) {
throw new UnsupportedOperationException("No modifications allowed on request headers.");
}
@Override
public synchronized Set<Map.Entry<String, List<String>>> entrySet() {
if (null != entrySet) {
return entrySet;
}
List<Map.Entry<String, String>> entries = requestHeaders.entries();
entrySet = new HashSet<Map.Entry<String, List<String>>>(entries.size());
for (final Map.Entry<String, String> entry : entries) {
ArrayList<String> listValue = new ArrayList<String>();
listValue.add(entry.getValue());
entrySet.add(new SimpleEntry<String, List<String>>(entry.getKey(), listValue));
}
return entrySet;
}
@Override
public Set<String> keySet() {
return requestHeaders.names();
}
@Override
public synchronized Collection<List<String>> values() {
if (null != values) {
return values;
}
values = new ArrayList<List<String>>();
for (String headerName : requestHeaders.names()) {
values.add(requestHeaders.getAll(headerName));
}
return values;
}
@Override
public boolean equals(Object o) {
return requestHeaders.equals(o);
}
@Override
public int hashCode() {
return requestHeaders.hashCode();
}
@Override
public String toString() {
return requestHeaders.toString();
}
}
}
| 3,187 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/PropertiesBasedResourceConfig.java | package netflix.karyon.jersey.blocking;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.api.core.ScanningResourceConfig;
import com.sun.jersey.core.spi.scanning.PackageNamesScanner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.MediaType;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import static com.netflix.config.ConfigurationManager.getConfigInstance;
/**
* An implementation of {@link ResourceConfig} that enables users to define all jersey properties in a property file
* loaded by karyon via archaius. <br>
* This supports scanning of classpath (using {@link ScanningResourceConfig}) to discover provider and other resource
* classes. The scanning of classpath is done lazily, at the first call to {@link #getClasses()} in order to make sure
* that we do not do scanning too early, even before all properties are loaded.
*
* @author Nitesh Kant
*/
public class PropertiesBasedResourceConfig extends ScanningResourceConfig {
private static final Logger logger = LoggerFactory.getLogger(PropertiesBasedResourceConfig.class);
private static final String JERSEY_ROOT_PACKAGE = "com.sun.jersey";
private volatile boolean initialized;
@Override
public Set<Class<?>> getClasses() {
initIfRequired();
return super.getClasses();
}
@Override
public Set<Object> getSingletons() {
initIfRequired();
return super.getSingletons();
}
@Override
public Map<String, MediaType> getMediaTypeMappings() {
initIfRequired();
return super.getMediaTypeMappings();
}
@Override
public Map<String, String> getLanguageMappings() {
initIfRequired();
return super.getLanguageMappings();
}
@Override
public Map<String, Object> getExplicitRootResources() {
initIfRequired();
return super.getExplicitRootResources();
}
@Override
public Map<String, Boolean> getFeatures() {
initIfRequired();
return super.getFeatures();
}
@Override
public boolean getFeature(String featureName) {
initIfRequired();
return super.getFeature(featureName);
}
@Override
public Map<String, Object> getProperties() {
initIfRequired();
return super.getProperties();
}
@Override
public Object getProperty(String propertyName) {
initIfRequired();
return super.getProperty(propertyName);
}
private synchronized void initIfRequired() {
if (initialized) {
return;
}
initialized = true;
String pkgNamesStr = getConfigInstance().getString(PackagesResourceConfig.PROPERTY_PACKAGES, null);
if (null == pkgNamesStr) {
logger.warn("No property defined with name: " + PackagesResourceConfig.PROPERTY_PACKAGES +
", this means that jersey can not find any of your resource/provider classes.");
} else {
String[] pkgNames = getElements(new String[]{pkgNamesStr}, ResourceConfig.COMMON_DELIMITERS);
logger.info("Packages to scan by jersey {}", Arrays.toString(pkgNames));
init(new PackageNamesScanner(pkgNames));
}
Map<String, Object> jerseyProperties = createPropertiesMap();
setPropertiesAndFeatures(jerseyProperties);
}
private static Map<String, Object> createPropertiesMap() {
Properties properties = new Properties();
Iterator<String> iter = getConfigInstance().getKeys(JERSEY_ROOT_PACKAGE);
while (iter.hasNext()) {
String key = iter.next();
properties.setProperty(key, getConfigInstance().getString(key));
}
return new TypeSafePropertiesDelegate(properties);
}
private static class TypeSafePropertiesDelegate implements Map<String, Object> {
private final Properties properties;
// This intends to not make a copy of the properties but just refer to the property name & delegate to the
// properties instance for values.
private final Set<Entry<String, Object>> entrySet;
public TypeSafePropertiesDelegate(Properties properties) {
this.properties = properties;
entrySet = new HashSet<Entry<String, Object>>(properties.size());
for (final String propName : properties.stringPropertyNames()) {
entrySet.add(new Entry<String, Object>() {
@Override
public String getKey() {
return propName;
}
@Override
public Object getValue() {
return TypeSafePropertiesDelegate.this.properties.getProperty(propName);
}
@Override
public Object setValue(Object value) {
throw new UnsupportedOperationException("Writes are not supported on jersey features and properties map.");
}
});
}
}
@Override
public int size() {
return properties.size();
}
@Override
public boolean isEmpty() {
return properties.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return properties.contains(key);
}
@Override
public boolean containsValue(Object value) {
return properties.containsValue(value);
}
@Override
public Object get(Object key) {
return properties.getProperty(String.valueOf(key));
}
@Override
public Object put(String key, Object value) {
throw new UnsupportedOperationException("Writes are not supported on jersey features and properties map.");
}
@Override
public Object remove(Object key) {
throw new UnsupportedOperationException("Writes are not supported on jersey features and properties map.");
}
@Override
public void putAll(Map<? extends String, ?> m) {
throw new UnsupportedOperationException("Writes are not supported on jersey features and properties map.");
}
@Override
public void clear() {
throw new UnsupportedOperationException("Writes are not supported on jersey features and properties map.");
}
@Override
public Set<String> keySet() {
return properties.stringPropertyNames();
}
@Override
public Collection<Object> values() {
return properties.values();
}
@Override
public Set<Entry<String, Object>> entrySet() {
return entrySet;
}
}
}
| 3,188 |
0 | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey | Create_ds/karyon/karyon2-jersey-blocking/src/main/java/netflix/karyon/jersey/blocking/KaryonJerseyModule.java | package netflix.karyon.jersey.blocking;
import io.netty.buffer.ByteBuf;
import netflix.karyon.transport.http.KaryonHttpModule;
/**
* @author Nitesh Kant
*/
public abstract class KaryonJerseyModule extends KaryonHttpModule<ByteBuf, ByteBuf> {
public KaryonJerseyModule() {
super("karyonJerseyModule", ByteBuf.class, ByteBuf.class);
}
protected KaryonJerseyModule(String moduleName) {
super(moduleName, ByteBuf.class, ByteBuf.class);
}
@Override
protected void configure() {
bindRouter().to(JerseyBasedRouter.class);
super.configure();
}
}
| 3,189 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/jersey/HelloworldResource.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package netflix.karyon.examples.hellonoss.server.jersey;
import com.google.inject.Singleton;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@Singleton
@Path("/hello")
public class HelloworldResource {
private static final Logger logger = LoggerFactory.getLogger(HelloworldResource.class);
@Path("to/{name}")
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response helloTo(@PathParam("name") String name) {
JSONObject response = new JSONObject();
try {
response.put("Message", "Hello " + name + " from Netflix OSS");
return Response.ok(response.toString()).build();
} catch (JSONException e) {
logger.error("Error creating json response.", e);
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
}
}
@Path("to/person")
@POST
@Produces(MediaType.APPLICATION_JSON)
public Response helloToPerson(String name) {
JSONObject response = new JSONObject();
try {
response.put("Message", "Hello " + name + " from Netflix OSS");
return Response.ok(response.toString()).build();
} catch (JSONException e) {
logger.error("Error creating json response.", e);
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
}
}
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response hello() {
JSONObject response = new JSONObject();
try {
response.put("Message", "Hello from Netflix OSS");
return Response.ok(response.toString()).build();
} catch (JSONException e) {
logger.error("Error creating json response.", e);
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
}
}
} | 3,190 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/jersey/HealthcheckResource.java | package netflix.karyon.examples.hellonoss.server.jersey;
import com.google.inject.Inject;
import netflix.karyon.health.HealthCheckHandler;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* @author Nitesh Kant
*/
@Path("/healthcheck")
public class HealthcheckResource {
private final HealthCheckHandler healthCheckHandler;
@Inject
public HealthcheckResource(HealthCheckHandler healthCheckHandler) {
this.healthCheckHandler = healthCheckHandler;
}
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response healthcheck() {
return Response.status(healthCheckHandler.getStatus()).build();
}
}
| 3,191 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/jersey/JerseyHelloWorldApp.java | package netflix.karyon.examples.hellonoss.server.jersey;
import com.netflix.governator.annotations.Modules;
import netflix.adminresources.resources.KaryonWebAdminModule;
import netflix.karyon.KaryonBootstrap;
import netflix.karyon.ShutdownModule;
import netflix.karyon.archaius.ArchaiusBootstrap;
import netflix.karyon.examples.hellonoss.common.LoggingInterceptor;
import netflix.karyon.examples.hellonoss.common.auth.AuthInterceptor;
import netflix.karyon.examples.hellonoss.common.auth.AuthenticationService;
import netflix.karyon.examples.hellonoss.common.auth.AuthenticationServiceImpl;
import netflix.karyon.examples.hellonoss.common.health.HealthCheck;
import netflix.karyon.examples.hellonoss.server.jersey.JerseyHelloWorldApp.KaryonJerseyModuleImpl;
import netflix.karyon.jersey.blocking.KaryonJerseyModule;
import netflix.karyon.servo.KaryonServoModule;
@ArchaiusBootstrap
@KaryonBootstrap(name = "hello-netflix-oss", healthcheck = HealthCheck.class)
@Modules(include = {
ShutdownModule.class,
KaryonWebAdminModule.class,
// KaryonEurekaModule.class, // Uncomment this to enable Eureka client.
KaryonJerseyModuleImpl.class,
KaryonServoModule.class
})
public interface JerseyHelloWorldApp {
class KaryonJerseyModuleImpl extends KaryonJerseyModule {
@Override
protected void configureServer() {
bind(AuthenticationService.class).to(AuthenticationServiceImpl.class);
interceptorSupport().forUri("/*").intercept(LoggingInterceptor.class);
interceptorSupport().forUri("/hello").interceptIn(AuthInterceptor.class);
server().port(8888).threadPoolSize(100);
}
}
}
| 3,192 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple/SimpleRouter.java | package netflix.karyon.examples.hellonoss.server.simple;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import io.reactivex.netty.protocol.http.server.RequestHandler;
import netflix.karyon.examples.hellonoss.common.health.HealthCheck;
import netflix.karyon.transport.http.SimpleUriRouter;
import netflix.karyon.transport.http.health.HealthCheckEndpoint;
import rx.Observable;
/**
* A {@link RequestHandler} implementation for this example.
*
* @author Nitesh Kant
*/
public class SimpleRouter implements RequestHandler<ByteBuf, ByteBuf> {
private final SimpleUriRouter<ByteBuf, ByteBuf> delegate;
public SimpleRouter() {
final HelloWorldEndpoint endpoint = new HelloWorldEndpoint();
delegate = new SimpleUriRouter<ByteBuf, ByteBuf>();
delegate.addUri("/healthcheck",
new HealthCheckEndpoint(new HealthCheck()))
.addUri("/hello",
new RequestHandler<ByteBuf, ByteBuf>() {
@Override
public Observable<Void> handle(
HttpServerRequest<ByteBuf> request,
HttpServerResponse<ByteBuf> response) {
return endpoint.sayHello(response);
}
})
.addUri("/hello/to/*",
new RequestHandler<ByteBuf, ByteBuf>() {
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request,
HttpServerResponse<ByteBuf> response) {
return endpoint.sayHelloToUser(request, response);
}
});
}
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
return delegate.handle(request, response);
}
}
| 3,193 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple/HelloWorldEndpoint.java | package netflix.karyon.examples.hellonoss.server.simple;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.reactivex.netty.channel.StringTransformer;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
/**
* @author Tomasz Bak
*/
public class HelloWorldEndpoint {
private static final Logger logger = LoggerFactory.getLogger(HelloWorldEndpoint.class);
public Observable<Void> sayHello(HttpServerResponse<ByteBuf> response) {
JSONObject content = new JSONObject();
try {
content.put("Message", "Hello from Netflix OSS");
response.write(content.toString(), StringTransformer.DEFAULT_INSTANCE);
return response.close();
} catch (JSONException e) {
logger.error("Error creating json response.", e);
return Observable.error(e);
}
}
public Observable<Void> sayHelloToUser(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
JSONObject content = new JSONObject();
int prefixLength = "/hello/to".length();
String userName = request.getPath().substring(prefixLength);
try {
if (userName.isEmpty() || userName.length() == 1 /*The uri is /hello/to/ but no name */) {
response.setStatus(HttpResponseStatus.BAD_REQUEST);
content.put("Error", "Please provide a username to say hello. The URI should be /hello/to/{username}");
} else {
content.put("Message", "Hello " + userName.substring(1) /*Remove the / prefix*/ + " from Netflix OSS");
}
} catch (JSONException e) {
logger.error("Error creating json response.", e);
return Observable.error(e);
}
response.write(content.toString(), StringTransformer.DEFAULT_INSTANCE);
return response.close();
}
}
| 3,194 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple/module/RouterWithInterceptors.java | package netflix.karyon.examples.hellonoss.server.simple.module;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import io.reactivex.netty.protocol.http.server.RequestHandler;
import netflix.karyon.examples.hellonoss.common.LoggingInterceptor;
import netflix.karyon.examples.hellonoss.common.auth.AuthInterceptor;
import netflix.karyon.examples.hellonoss.common.auth.AuthenticationServiceImpl;
import netflix.karyon.examples.hellonoss.server.simple.SimpleRouter;
import netflix.karyon.transport.http.HttpInterceptorSupport;
import netflix.karyon.transport.http.HttpRequestHandler;
import rx.Observable;
/**
* @author Nitesh Kant
*/
public class RouterWithInterceptors implements RequestHandler<ByteBuf, ByteBuf> {
private final HttpRequestHandler<ByteBuf, ByteBuf> delegate;
public RouterWithInterceptors() {
SimpleRouter router = new SimpleRouter();
HttpInterceptorSupport<ByteBuf, ByteBuf> interceptorSupport = new HttpInterceptorSupport<ByteBuf, ByteBuf>();
interceptorSupport.forUri("/*").intercept(new LoggingInterceptor());
interceptorSupport.forUri("/hello").intercept(new AuthInterceptor(new AuthenticationServiceImpl()));
delegate = new HttpRequestHandler<ByteBuf, ByteBuf>(router, interceptorSupport);
}
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
return delegate.handle(request, response);
}
}
| 3,195 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple/module/SimpleRunner.java | package netflix.karyon.examples.hellonoss.server.simple.module;
import netflix.adminresources.resources.KaryonWebAdminModule;
import netflix.karyon.Karyon;
import netflix.karyon.KaryonBootstrapModule;
import netflix.karyon.ShutdownModule;
import netflix.karyon.archaius.ArchaiusBootstrapModule;
import netflix.karyon.servo.KaryonServoModule;
/**
* @author Nitesh Kant
*/
public class SimpleRunner {
public static void main(String[] args) {
Karyon.forRequestHandler(8888,
// new SimpleRouter(), /* Use this instead of RouterWithInterceptors below if interceptors are not required */
new RouterWithInterceptors(),
new KaryonBootstrapModule(),
new ArchaiusBootstrapModule("hello-netflix-oss"),
// KaryonEurekaModule.asBootstrapModule(), /* Uncomment if you need eureka */
Karyon.toBootstrapModule(KaryonWebAdminModule.class),
ShutdownModule.asBootstrapModule(),
KaryonServoModule.asBootstrapModule())
.startAndWaitTillShutdown();
}
}
| 3,196 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/simple/annotation/SimpleRoutingApp.java | package netflix.karyon.examples.hellonoss.server.simple.annotation;
import com.google.inject.Singleton;
import com.netflix.governator.annotations.Modules;
import io.netty.buffer.ByteBuf;
import netflix.adminresources.resources.KaryonWebAdminModule;
import netflix.karyon.KaryonBootstrap;
import netflix.karyon.ShutdownModule;
import netflix.karyon.archaius.ArchaiusBootstrap;
import netflix.karyon.examples.hellonoss.common.LoggingInterceptor;
import netflix.karyon.examples.hellonoss.common.auth.AuthInterceptor;
import netflix.karyon.examples.hellonoss.common.auth.AuthenticationService;
import netflix.karyon.examples.hellonoss.common.auth.AuthenticationServiceImpl;
import netflix.karyon.examples.hellonoss.common.health.HealthCheck;
import netflix.karyon.examples.hellonoss.server.simple.SimpleRouter;
import netflix.karyon.examples.hellonoss.server.simple.annotation.SimpleRoutingApp.KaryonRxRouterModuleImpl;
import netflix.karyon.servo.KaryonServoModule;
import netflix.karyon.transport.http.KaryonHttpModule;
/**
* @author Tomasz Bak
*/
@ArchaiusBootstrap
@KaryonBootstrap(name = "hello-netflix-oss", healthcheck = HealthCheck.class)
@Singleton
@Modules(include = {
ShutdownModule.class,
KaryonServoModule.class,
KaryonWebAdminModule.class,
// KaryonEurekaModule.class, // Uncomment this to enable Eureka client.
KaryonRxRouterModuleImpl.class
})
public interface SimpleRoutingApp {
class KaryonRxRouterModuleImpl extends KaryonHttpModule<ByteBuf, ByteBuf> {
public KaryonRxRouterModuleImpl() {
super("httpServerA", ByteBuf.class, ByteBuf.class);
}
@Override
protected void configureServer() {
bindRouter().toInstance(new SimpleRouter());
bind(AuthenticationService.class).to(AuthenticationServiceImpl.class);
interceptorSupport().forUri("/*").intercept(LoggingInterceptor.class);
interceptorSupport().forUri("/hello").interceptIn(AuthInterceptor.class);
server().port(8888);
}
}
}
| 3,197 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/rxnetty/MyApplicationRunner.java | package netflix.karyon.examples.hellonoss.server.rxnetty;
import netflix.adminresources.resources.KaryonWebAdminModule;
import netflix.karyon.Karyon;
import netflix.karyon.KaryonBootstrapModule;
import netflix.karyon.ShutdownModule;
import netflix.karyon.archaius.ArchaiusBootstrapModule;
import netflix.karyon.examples.hellonoss.common.health.HealthCheck;
import netflix.karyon.servo.KaryonServoModule;
import netflix.karyon.transport.http.health.HealthCheckEndpoint;
/**
* @author Nitesh Kant
*/
public class MyApplicationRunner {
public static void main(String[] args) {
HealthCheck healthCheckHandler = new HealthCheck();
Karyon.forRequestHandler(8888,
new RxNettyHandler("/healthcheck",
new HealthCheckEndpoint(healthCheckHandler)),
new KaryonBootstrapModule(healthCheckHandler),
new ArchaiusBootstrapModule("hello-netflix-oss"),
// KaryonEurekaModule.asBootstrapModule(), /* Uncomment if you need eureka */
Karyon.toBootstrapModule(KaryonWebAdminModule.class),
ShutdownModule.asBootstrapModule(),
KaryonServoModule.asBootstrapModule()
).startAndWaitTillShutdown();
}
}
| 3,198 |
0 | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server | Create_ds/karyon/karyon2-examples/src/main/java/netflix/karyon/examples/hellonoss/server/rxnetty/RxNettyHandler.java | package netflix.karyon.examples.hellonoss.server.rxnetty;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import io.reactivex.netty.protocol.http.server.RequestHandler;
import netflix.karyon.transport.http.health.HealthCheckEndpoint;
import rx.Observable;
/**
* @author Nitesh Kant
*/
public class RxNettyHandler implements RequestHandler<ByteBuf, ByteBuf> {
private final String healthCheckUri;
private final HealthCheckEndpoint healthCheckEndpoint;
public RxNettyHandler(String healthCheckUri, HealthCheckEndpoint healthCheckEndpoint) {
this.healthCheckUri = healthCheckUri;
this.healthCheckEndpoint = healthCheckEndpoint;
}
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request, HttpServerResponse<ByteBuf> response) {
if (request.getUri().startsWith(healthCheckUri)) {
return healthCheckEndpoint.handle(request, response);
} else if (request.getUri().startsWith("/hello/to/")) {
int prefixLength = "/hello/to".length();
String userName = request.getPath().substring(prefixLength);
if (userName.isEmpty() || userName.length() == 1 /*The uri is /hello/to/ but no name */) {
response.setStatus(HttpResponseStatus.BAD_REQUEST);
return response.writeStringAndFlush(
"{\"Error\":\"Please provide a username to say hello. The URI should be /hello/to/{username}\"}");
} else {
String msg = "Hello " + userName.substring(1) /*Remove the / prefix*/ + " from Netflix OSS";
return response.writeStringAndFlush("{\"Message\":\"" + msg + "\"}");
}
} else if (request.getUri().startsWith("/hello")) {
return response.writeStringAndFlush("{\"Message\":\"Hello newbee from Netflix OSS\"}");
} else {
response.setStatus(HttpResponseStatus.NOT_FOUND);
return response.close();
}
}
}
| 3,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.