index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/configuration/PropertiesConfigSource.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.configuration;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URL;
import java.util.Map;
import java.util.Properties;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Loads the 'Raigad.properties' file as a source.
*/
public class PropertiesConfigSource extends AbstractConfigSource
{
private static final Logger logger = LoggerFactory.getLogger(PropertiesConfigSource.class.getName());
private static final String DEFAULT_RAIGAD_PROPERTIES = "Raigad.properties";
private final Map<String, String> data = Maps.newConcurrentMap();
private final String raigadFile;
public PropertiesConfigSource()
{
this.raigadFile = DEFAULT_RAIGAD_PROPERTIES;
}
public PropertiesConfigSource(final Properties properties)
{
checkNotNull(properties);
this.raigadFile = DEFAULT_RAIGAD_PROPERTIES;
clone(properties);
}
@VisibleForTesting
PropertiesConfigSource(final String file)
{
this.raigadFile = checkNotNull(file);
}
@Override
public void initialize(final String asgName, final String region)
{
super.initialize(asgName, region);
Properties properties = new Properties();
URL url = PropertiesConfigSource.class.getClassLoader().getResource(raigadFile);
if (url != null)
{
try
{
properties.load(url.openStream());
clone(properties);
}
catch (IOException e)
{
logger.info("No Raigad.properties. Ignore!");
}
}
else
{
logger.info("No Raigad.properties. Ignore!");
}
}
@Override
public void initialize(IConfiguration config) {
//NO OP as we initiaie using asgName and region
}
@Override
public String get(final String prop)
{
return data.get(prop);
}
@Override
public void set(final String key, final String value)
{
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
data.put(key, value);
}
@Override
public int size()
{
return data.size();
}
@Override
public boolean contains(final String prop)
{
return data.containsKey(prop);
}
/**
* Clones all the values from the properties. If the value is null, it will be ignored.
*
* @param properties to clone
*/
private void clone(final Properties properties)
{
if (properties.isEmpty()) return;
synchronized (properties)
{
for (final String key : properties.stringPropertyNames())
{
final String value = properties.getProperty(key);
if (!Strings.isNullOrEmpty(value))
{
data.put(key, value);
}
}
}
}
} | 5,500 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/configuration/CustomConfigSource.java | package com.netflix.raigad.configuration;
import java.util.List;
/*
Currently, a noop config source
*/
public class CustomConfigSource implements IConfigSource {
@Override
public void initialize(String asgName, String region) {
}
@Override
public void initialize(IConfiguration config) {
//NO OP
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public boolean contains(String key) {
return false;
}
@Override
public String get(String key) {
return null;
}
@Override
public String get(String key, String defaultValue) {
return null;
}
@Override
public boolean get(String key, boolean defaultValue) {
return false;
}
@Override
public Class<?> get(String key, Class<?> defaultValue) {
return null;
}
@Override
public <T extends Enum<T>> T get(String key, T defaultValue) {
return null;
}
@Override
public int get(String key, int defaultValue) {
return 0;
}
@Override
public long get(String key, long defaultValue) {
return 0;
}
@Override
public float get(String key, float defaultValue) {
return 0;
}
@Override
public double get(String key, double defaultValue) {
return 0;
}
@Override
public List<String> getList(String key) {
return null;
}
@Override
public List<String> getList(String key, List<String> defaultValue) {
return null;
}
@Override
public void set(String key, String value) {
}
}
| 5,501 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/configuration/CompositeConfigSource.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.configuration;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import java.util.Collection;
/**
* A {@link IConfigSource} that delegates method calls to the underline sources. The order in which values are provided
* depend on the {@link IConfigSource}s provided. If user asks for key 'foo', and this composite has three sources, it
* will first check if the key is found in the first source, if not it will check the second and if not, the third, else
* return null or false if {@link #contains(String)} was called.
* <p/>
* Implementation note: get methods with a default are implemented in {@link AbstractConfigSource}, if the underlying
* source overrides one of these methods, then that implementation will be ignored.
*/
public class CompositeConfigSource extends AbstractConfigSource
{
private final ImmutableCollection<? extends IConfigSource> sources;
public CompositeConfigSource(final ImmutableCollection<? extends IConfigSource> sources)
{
Preconditions.checkArgument(!sources.isEmpty(), "Can not create a composite config source without config sources!");
this.sources = sources;
}
public CompositeConfigSource(final Collection<? extends IConfigSource> sources)
{
this(ImmutableList.copyOf(sources));
}
public CompositeConfigSource(final Iterable<? extends IConfigSource> sources)
{
this(ImmutableList.copyOf(sources));
}
public CompositeConfigSource(final IConfigSource... sources)
{
this(ImmutableList.copyOf(sources));
}
@Override
public void initialize(final String asgName, final String region)
{
for (final IConfigSource source : sources)
{
//TODO should this catch any potential exceptions?
source.initialize(asgName, region);
}
}
@Override
public void initialize(IConfiguration config) {
//NO OP as we initiaie using asgName and region
}
@Override
public int size()
{
int size = 0;
for (final IConfigSource c : sources)
{
size += c.size();
}
return size;
}
@Override
public boolean isEmpty()
{
return size() == 0;
}
@Override
public boolean contains(final String key)
{
return get(key) != null;
}
@Override
public String get(final String key)
{
Preconditions.checkNotNull(key);
for (final IConfigSource c : sources)
{
final String value = c.get(key);
if (value != null)
{
return value;
}
}
return null;
}
@Override
public void set(final String key, final String value)
{
Preconditions.checkNotNull(value, "Value can not be null for configurations.");
final IConfigSource firstSource = Iterables.getFirst(sources, null);
// firstSource shouldn't be null because the collection is immutable, and the collection is non empty.
Preconditions.checkState(firstSource != null, "There was no IConfigSource found at the first location?");
firstSource.set(key, value);
}
} | 5,502 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/configuration/SystemPropertiesConfigSource.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.configuration;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.commons.lang.StringUtils;
import java.util.Map;
import java.util.Properties;
/**
* Loads {@link System#getProperties()} as a source.
* <p/>
* Implementation note: {@link #set(String, String)} does not write to system properties, but will write to a new map.
* This means that setting values to this source has no effect on system properties or other instances of this class.
*/
public final class SystemPropertiesConfigSource extends AbstractConfigSource {
private final Map<String, String> data = Maps.newConcurrentMap();
@Override
public void initialize(final String asgName, final String region) {
super.initialize(asgName, region);
Properties systemProps = System.getProperties();
for (final String key : systemProps.stringPropertyNames()) {
if (!key.startsWith(RaigadConfiguration.MY_WEBAPP_NAME)) {
continue;
}
final String value = systemProps.getProperty(key);
if (!StringUtils.isEmpty(value)) {
data.put(key, value);
}
}
}
@Override
public void initialize(IConfiguration config) {
//NO OP as we initiaie using asgName and region
}
@Override
public int size() {
return data.size();
}
@Override
public String get(final String key) {
return data.get(key);
}
@Override
public void set(final String key, final String value) {
Preconditions.checkNotNull(value, "Value can not be null for configurations");
data.put(key, value);
}
} | 5,503 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/InstanceManager.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.utils.RetriableCallable;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
/**
* This class provides the central place to create and consume the identity of the instance
*/
@Singleton
public class InstanceManager {
private static final Logger logger = LoggerFactory.getLogger(InstanceManager.class);
private static final String COMMA_SEPARATOR = ",";
private static final String PARAM_SEPARATOR = "=";
private final IRaigadInstanceFactory instanceFactory;
private final IMembership membership;
private final IConfiguration config;
private RaigadInstance thisInstance;
@Inject
public InstanceManager(IRaigadInstanceFactory instanceFactory, IMembership membership, IConfiguration config) throws Exception {
this.instanceFactory = instanceFactory;
this.membership = membership;
this.config = config;
init();
}
private void init() throws Exception {
logger.info("Deregistering dead instances");
new RetriableCallable<Void>() {
@Override
public Void retriableCall() throws Exception {
deregisterInstance(instanceFactory, config);
return null;
}
}.call();
logger.info("Registering this instance");
thisInstance = new RetriableCallable<RaigadInstance>() {
@Override
public RaigadInstance retriableCall() throws Exception {
RaigadInstance instance = registerInstance(instanceFactory, config);
return instance;
}
}.call();
logger.info("Raigad instance details: " + thisInstance.toString());
}
private RaigadInstance registerInstance(IRaigadInstanceFactory instanceFactory, IConfiguration config) throws Exception {
return instanceFactory.create(
config.getAppName(),
config.getDC() + "." + config.getInstanceId(),
config.getInstanceId(), config.getHostname(),
config.getHostIP(), config.getRac(), config.getDC(), config.getASGName(), null);
}
private void deregisterInstance(IRaigadInstanceFactory instanceFactory, IConfiguration config) throws Exception {
final List<RaigadInstance> allInstances = getInstanceList();
HashSet<String> asgNames = new HashSet<>();
for (RaigadInstance raigadInstance : allInstances) {
if (!asgNames.contains(raigadInstance.getAsg())) {
asgNames.add(raigadInstance.getAsg());
}
}
logger.info("Known instances: {}", allInstances);
logger.info("Known ASG's: {}", StringUtils.join(asgNames, ","));
Map<String, List<String>> instancesPerAsg = membership.getRacMembership(asgNames);
logger.info("Known instances per ASG: {}", instancesPerAsg);
for (RaigadInstance knownInstance : allInstances) {
// Test same region and if it is alive.
// TODO: Provide a config property to choose same DC/Region
if (instancesPerAsg.containsKey(knownInstance.getAsg())) {
if (!knownInstance.getAsg().equals(config.getASGName())) {
logger.info("Skipping {} - different ASG", knownInstance.getInstanceId());
continue;
}
if (!knownInstance.getAvailabilityZone().equals(config.getRac())) {
logger.info("Skipping {} - different AZ", knownInstance.getInstanceId());
continue;
}
if (instancesPerAsg.get(config.getASGName()).contains(knownInstance.getInstanceId())) {
logger.info("Skipping {} - legitimate node", knownInstance.getInstanceId());
continue;
}
logger.info("Found dead instance: " + knownInstance.getInstanceId());
instanceFactory.delete(knownInstance);
}
else if (config.isMultiDC()) {
logger.info("Multi DC setup, skipping unknown instances (" + knownInstance.getInstanceId() + ")");
}
else if (config.amISourceClusterForTribeNode()) {
logger.info("Tribe setup, skipping unknown instances (" + knownInstance.getInstanceId() + ")");
}
else {
logger.info("Found dead instance: " + knownInstance.getInstanceId());
instanceFactory.delete(knownInstance);
}
}
}
public RaigadInstance getInstance() {
return thisInstance;
}
public List<RaigadInstance> getAllInstances() {
return getInstanceList();
}
private List<RaigadInstance> getInstanceList() {
List<RaigadInstance> instances = new ArrayList<RaigadInstance>();
// Considering same cluster will not serve as a tribe node and source cluster for the tribe node
if (config.amITribeNode()) {
String clusterParams = config.getCommaSeparatedSourceClustersForTribeNode();
assert (clusterParams != null) : "I am a tribe node but I need one or more source clusters";
String[] clusters = StringUtils.split(clusterParams, COMMA_SEPARATOR);
assert (clusters.length != 0) : "One or more clusters needed";
List<String> sourceClusters = new ArrayList<>();
// Adding current cluster
sourceClusters.add(config.getAppName());
// Common settings
for (int i = 0; i < clusters.length; i ++) {
String[] clusterAndPort = clusters[i].split(PARAM_SEPARATOR);
assert (clusterAndPort.length != 2) : "Cluster name or transport port is missing in configuration";
sourceClusters.add(clusterAndPort[0]);
logger.info("Adding cluster = <{}> ", clusterAndPort[0]);
}
for (String sourceClusterName : sourceClusters) {
instances.addAll(instanceFactory.getAllIds(sourceClusterName));
}
logger.info("Printing tribe node related nodes...");
for (RaigadInstance instance:instances) {
logger.info(instance.toString());
}
}
else {
instances.addAll(instanceFactory.getAllIds(config.getAppName()));
}
if (config.isDebugEnabled()) {
for (RaigadInstance instance : instances) {
logger.debug(instance.toString());
}
}
return instances;
}
public List<RaigadInstance> getAllInstancesPerCluster(String clusterName) {
return getInstanceListPerCluster(clusterName);
}
private List<RaigadInstance> getInstanceListPerCluster(String clusterName) {
List<RaigadInstance> instances = new ArrayList<RaigadInstance>();
instances.addAll(instanceFactory.getAllIds(clusterName.trim().toLowerCase()));
if (config.isDebugEnabled()) {
for (RaigadInstance instance : instances) {
logger.debug(instance.toString());
}
}
return instances;
}
public boolean isMaster() {
//For non-dedicated deployments, return true (every node can be a master)
return (!config.isAsgBasedDedicatedDeployment() || config.getASGName().toLowerCase().contains("master"));
}
} | 5,504 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/RaigadInstance.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import java.io.Serializable;
public class RaigadInstance implements Serializable
{
private static final long serialVersionUID = 5606412386974488659L;
private String hostname;
private long updatetime;
private boolean outOfService;
private String Id;
private String app;
private String instanceId;
private String availabilityZone;
private String publicip;
private String dc;
private String asgName;
public String getId() {
return Id;
}
public void setId(String id) {
this.Id = id;
}
public String getApp() {
return app;
}
public void setApp(String app) {
this.app = app;
}
public String getInstanceId() {
return instanceId;
}
public void setInstanceId(String instanceId) {
this.instanceId = instanceId;
}
public String getAvailabilityZone() {
return availabilityZone;
}
public void setAvailabilityZone(String availabilityZone) {
this.availabilityZone = availabilityZone;
}
public String getHostName() {
return hostname;
}
public String getHostIP() {
return publicip;
}
public void setHostName(String hostname) {
this.hostname = hostname;
}
public void setHostIP(String publicip) {
this.publicip = publicip;
}
@Override
public String toString() {
return String
.format("Hostname: %s, InstanceId: %s, App: %s, AvailabilityZone : %s, Id : %s, PublicIp : %s, DC : %s, ASG : %s, UpdateTime : %s",
getHostName(), getInstanceId(), getApp(),
getAvailabilityZone(), getId(), getHostIP(), getDC(), getAsg(), getUpdatetime());
}
public String getDC() {
return dc;
}
public void setDC(String dc) {
this.dc = dc;
}
public String getAsg() {
return asgName;
}
public void setAsg(String asgName) {
this.asgName = asgName;
}
public long getUpdatetime() {
return updatetime;
}
public void setUpdatetime(long updatetime) {
this.updatetime = updatetime;
}
public boolean isOutOfService()
{
return outOfService;
}
public void setOutOfService(boolean outOfService)
{
this.outOfService = outOfService;
}
}
| 5,505 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/CassandraInstanceFactory.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* Factory to use Cassandra for managing instance data
*/
@Singleton
public class CassandraInstanceFactory implements IRaigadInstanceFactory {
private static final Logger logger = LoggerFactory.getLogger(CassandraInstanceFactory.class);
@Inject
IConfiguration config;
@Inject
InstanceDataDAOCassandra dao;
@Override
public RaigadInstance create(String app, String id, String instanceID,
String hostname, String ip, String zone, String dc, String asgName,
Map<String, Object> volumes) {
try {
logger.info("Creating entry for instance {} (node ID {}, hostname {}, IP {}) in {} ES cluster in {}, {}",
instanceID, id, hostname, ip, app, zone, dc);
RaigadInstance raigadInstance = new RaigadInstance();
raigadInstance.setAvailabilityZone(zone);
raigadInstance.setHostIP(ip);
raigadInstance.setHostName(hostname);
raigadInstance.setId(id);
raigadInstance.setInstanceId(instanceID);
raigadInstance.setDC(dc);
raigadInstance.setApp(app);
raigadInstance.setAsg(asgName);
dao.createInstanceEntry(raigadInstance);
return raigadInstance;
}
catch (Exception e) {
logger.error(e.getMessage());
throw new RuntimeException(e);
}
}
@Override
public List<RaigadInstance> getAllIds(String appName) {
List<RaigadInstance> raigadInstances = new ArrayList<>(dao.getAllInstances(appName));
if (config.isDebugEnabled()) {
for (RaigadInstance instance : raigadInstances) {
logger.debug("Instance details: " + instance.getInstanceId());
}
}
return raigadInstances;
}
@Override
public RaigadInstance getInstance(String appName, String dc, String id) {
return dao.getInstance(appName, dc, id);
}
@Override
public void sort(List<RaigadInstance> list) {
Collections.sort(list, new Comparator<RaigadInstance>() {
@Override
public int compare(RaigadInstance esInstance1,
RaigadInstance esInstance2) {
int azCompare = esInstance1.getAvailabilityZone().compareTo(
esInstance2.getAvailabilityZone());
if (azCompare == 0) {
return esInstance1.getId().compareTo(esInstance2.getId());
} else {
return azCompare;
}
}
});
}
@Override
public void delete(RaigadInstance instance) {
try {
dao.deleteInstanceEntry(instance);
}
catch (Exception e) {
logger.error(e.getMessage());
throw new RuntimeException("Unable to deregister Raigad instance", e);
}
}
@Override
public void update(RaigadInstance arg0) {
// TODO Auto-generated method stub
}
@Override
public void attachVolumes(RaigadInstance arg0, String arg1, String arg2) {
// TODO Auto-generated method stub
}
} | 5,506 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/HostSupplier.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import com.google.common.base.Supplier;
import com.google.inject.ImplementedBy;
import com.netflix.astyanax.connectionpool.Host;
import java.util.List;
@ImplementedBy(EurekaHostsSupplier.class)
public interface HostSupplier {
public Supplier<List<Host>> getSupplier(String clusterName);
}
| 5,507 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/InstanceDataDAOCassandra.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import com.google.common.base.Supplier;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolType;
import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
import com.netflix.astyanax.model.*;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.thrift.ThriftFamilyFactory;
import com.netflix.astyanax.util.TimeUUIDUtils;
import com.netflix.raigad.configuration.IConfiguration;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* Use bootstrap cluster to find nodes
*/
@Singleton
public class InstanceDataDAOCassandra {
private static final Logger logger = LoggerFactory.getLogger(InstanceDataDAOCassandra.class);
private static final String CN_CLUSTER = "cluster";
private static final String CN_AZ = "availabilityZone";
private static final String CN_INSTANCEID = "instanceId";
private static final String CN_HOSTNAME = "hostname";
private static final String CN_IP = "ip";
private static final String CN_LOCATION = "location";
private static final String CN_ASGNAME = "asgname";
private static final String CN_UPDATETIME = "updatetime";
public static final String CF_NAME_INSTANCES = "instances";
public static final String CF_NAME_LOCKS = "locks";
private final Keyspace bootKeyspace;
private final IConfiguration config;
private final EurekaHostsSupplier eurekaHostsSupplier;
private final String BOOT_CLUSTER;
private final String KS_NAME;
private final int thriftPortForAstyanax;
private final AstyanaxContext<Keyspace> ctx;
public static final ColumnFamily<String, String> CF_INSTANCES =
new ColumnFamily<String, String>(CF_NAME_INSTANCES, StringSerializer.get(), StringSerializer.get());
public static final ColumnFamily<String, String> CF_LOCKS =
new ColumnFamily<String, String>(CF_NAME_LOCKS, StringSerializer.get(), StringSerializer.get());
@Inject
public InstanceDataDAOCassandra(IConfiguration config, EurekaHostsSupplier eurekaHostsSupplier)
throws ConnectionException {
this.config = config;
BOOT_CLUSTER = config.getBootClusterName();
if (BOOT_CLUSTER == null || BOOT_CLUSTER.isEmpty()) {
throw new RuntimeException("Boot cluster can not be blank. Please use getBootClusterName() property");
}
KS_NAME = config.getCassandraKeyspaceName();
if (KS_NAME == null || KS_NAME.isEmpty()) {
throw new RuntimeException("Cassandra keyspace can not be blank. Please use getCassandraKeyspaceName() property");
}
thriftPortForAstyanax = config.getCassandraThriftPortForAstyanax();
if (thriftPortForAstyanax <= 0) {
throw new RuntimeException("Thrift port for Astyanax can not be blank. Please use getCassandraThriftPortForAstyanax() property");
}
this.eurekaHostsSupplier = eurekaHostsSupplier;
if (config.isEurekaHostSupplierEnabled()) {
ctx = initWithThriftDriverWithEurekaHostsSupplier();
}
else {
ctx = initWithThriftDriverWithExternalHostsSupplier();
}
ctx.start();
bootKeyspace = ctx.getClient();
}
public void createInstanceEntry(RaigadInstance instance) throws Exception {
logger.info("Creating new instance entry");
String key = getRowKey(instance);
// If the key exists throw exception
if (getInstance(instance.getApp(), instance.getDC(), instance.getId()) != null) {
logger.info(String.format("Key already exists: %s", key));
return;
}
// Grab the lock
getLock(instance);
MutationBatch mutationBatch = bootKeyspace.prepareMutationBatch();
ColumnListMutation<String> columnListMutation = mutationBatch.withRow(CF_INSTANCES, key);
columnListMutation.putColumn(CN_CLUSTER, instance.getApp(), null);
columnListMutation.putColumn(CN_AZ, instance.getAvailabilityZone(), null);
columnListMutation.putColumn(CN_INSTANCEID, instance.getInstanceId(), null);
columnListMutation.putColumn(CN_HOSTNAME, instance.getHostName(), null);
columnListMutation.putColumn(CN_IP, instance.getHostIP(), null);
columnListMutation.putColumn(CN_LOCATION, instance.getDC(), null);
columnListMutation.putColumn(CN_ASGNAME, instance.getAsg(), null);
columnListMutation.putColumn(CN_UPDATETIME, TimeUUIDUtils.getUniqueTimeUUIDinMicros(), null);
mutationBatch.execute();
}
public RaigadInstance getInstance(String cluster, String region, String instanceId) {
List<RaigadInstance> instances = getAllInstances(cluster);
for (RaigadInstance instance : instances) {
if (instance.getInstanceId().equals(instanceId) && instance.getDC().equals(region)) {
return instance;
}
}
return null;
}
public List<RaigadInstance> getAllInstances(String cluster) {
List<RaigadInstance> list = new ArrayList<RaigadInstance>();
try {
String selectClause;
if (config.isMultiDC() || config.amISourceClusterForTribeNodeInMultiDC()) {
selectClause = String.format("SELECT * FROM %s WHERE %s = '%s' ",
CF_NAME_INSTANCES, CN_CLUSTER, cluster);
}
else {
selectClause = String.format("SELECT * FROM %s WHERE %s = '%s' AND %s = '%s' ",
CF_NAME_INSTANCES, CN_CLUSTER, cluster, CN_LOCATION, config.getDC());
}
if (config.isDebugEnabled()) {
logger.debug("Getting nodes for {}: {}", cluster, selectClause);
}
final ColumnFamily<String, String> CF_INSTANCES_NEW = ColumnFamily.newColumnFamily(
KS_NAME, StringSerializer.get(), StringSerializer.get());
OperationResult<CqlResult<String, String>> result =
bootKeyspace.prepareQuery(CF_INSTANCES_NEW).withCql(selectClause).execute();
for (Row<String, String> row : result.getResult().getRows()) {
list.add(transform(row.getColumns()));
}
}
catch (Exception e) {
logger.warn("Caught unknown exception while reading: {}", e.getMessage());
throw new RuntimeException(e);
}
if (config.isDebugEnabled()) {
for (RaigadInstance instance : list) {
logger.debug("Read instance: {}", instance.toString());
}
}
return list;
}
public void deleteInstanceEntry(RaigadInstance instance) throws Exception {
logger.info("Deleting dead instance entry");
// Acquire the lock first
getLock(instance);
// Delete the row
String key = findKey(instance.getApp(), instance.getInstanceId(), instance.getDC());
if (key == null) {
return; // don't fail it
}
MutationBatch m = bootKeyspace.prepareMutationBatch();
m.withRow(CF_INSTANCES, key).delete();
m.execute();
key = getLockingKey(instance);
// Delete key
m = bootKeyspace.prepareMutationBatch();
m.withRow(CF_LOCKS, key).delete();
m.execute();
// Have to delete choosing key as well to avoid issues with delete
// followed by immediate writes
key = getChoosingKey(instance);
m = bootKeyspace.prepareMutationBatch();
m.withRow(CF_LOCKS, key).delete();
m.execute();
}
protected void sort(List<RaigadInstance> list) {
Collections.sort(list, new Comparator<RaigadInstance>() {
@Override
public int compare(RaigadInstance esInstance1, RaigadInstance esInstance2) {
int azCompare = esInstance1.getAvailabilityZone().compareTo(esInstance2.getAvailabilityZone());
if (azCompare == 0) {
return esInstance1.getId().compareTo(esInstance2.getId());
}
else {
return azCompare;
}
}
});
}
/*
* To get a lock on the row - Create a choosing row and make sure there are
* no contenders. If there are bail out. Also delete the column when bailing
* out. - Once there are no contenders, grab the lock if it is not already
* taken.
*/
private void getLock(RaigadInstance instance) throws Exception {
String choosingkey = getChoosingKey(instance);
MutationBatch m = bootKeyspace.prepareMutationBatch();
ColumnListMutation<String> clm = m.withRow(CF_LOCKS, choosingkey);
// Expire in 6 sec
clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(6));
m.execute();
int count = bootKeyspace.prepareQuery(CF_LOCKS).getKey(choosingkey).getCount().execute().getResult();
if (count > 1) {
// Need to delete my entry
m.withRow(CF_LOCKS, choosingkey).deleteColumn(instance.getInstanceId());
m.execute();
throw new Exception(String.format("More than 1 contender for lock %s %d", choosingkey, count));
}
String lockKey = getLockingKey(instance);
OperationResult<ColumnList<String>> result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute();
if (result.getResult().size() > 0 && !result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) {
throw new Exception(String.format("Lock already taken %s", lockKey));
}
clm = m.withRow(CF_LOCKS, lockKey);
clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(600));
m.execute();
Thread.sleep(100);
result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute();
if (result.getResult().size() == 1 && result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) {
logger.info("Got lock " + lockKey);
return;
}
else {
throw new Exception(String.format("Cannot insert lock %s", lockKey));
}
}
public String findKey(String cluster, String instanceId, String dc) {
try {
final String selectClause = String.format(
"SELECT * FROM %s WHERE %s = '%s' and %s = '%s' and %s = '%s' ", CF_NAME_INSTANCES,
CN_CLUSTER, cluster, CN_INSTANCEID, instanceId, CN_LOCATION, dc);
logger.info(selectClause);
final ColumnFamily<String, String> CF_INSTANCES_NEW = ColumnFamily.newColumnFamily(KS_NAME,
StringSerializer.get(), StringSerializer.get());
OperationResult<CqlResult<String, String>> result = bootKeyspace.prepareQuery(CF_INSTANCES_NEW)
.withCql(selectClause).execute();
if (result == null || result.getResult().getRows().size() == 0) {
return null;
}
Row<String, String> row = result.getResult().getRows().getRowByIndex(0);
return row.getKey();
}
catch (Exception e) {
logger.warn("Caught an Unknown Exception during find a row matching cluster[" + cluster +
"], id[" + instanceId + "], and region[" + dc + "] ... -> "
+ e.getMessage());
throw new RuntimeException(e);
}
}
private RaigadInstance transform(ColumnList<String> columns) {
RaigadInstance instance = new RaigadInstance();
Map<String, String> columnMap = new HashMap<>();
for (Column<String> column : columns) {
columnMap.put(column.getName(), column.getStringValue());
if (column.getName().equals(CN_CLUSTER)) {
instance.setUpdatetime(column.getTimestamp());
}
}
instance.setId(columnMap.get(CN_LOCATION) + "." + columnMap.get(CN_INSTANCEID));
instance.setApp(columnMap.get(CN_CLUSTER));
instance.setAvailabilityZone(columnMap.get(CN_AZ));
instance.setHostName(columnMap.get(CN_HOSTNAME));
instance.setHostIP(columnMap.get(CN_IP));
instance.setInstanceId(columnMap.get(CN_INSTANCEID));
instance.setAsg(columnMap.get(CN_ASGNAME));
instance.setDC(columnMap.get(CN_LOCATION));
return instance;
}
private String getChoosingKey(RaigadInstance instance) {
return instance.getApp() + "_" + instance.getDC() + "_" + instance.getInstanceId() + "-choosing";
}
private String getLockingKey(RaigadInstance instance) {
return instance.getApp() + "_" + instance.getDC() + "_" + instance.getInstanceId() + "-lock";
}
private String getRowKey(RaigadInstance instance) {
return instance.getApp() + "_" + instance.getDC() + "_" + instance.getInstanceId();
}
private AstyanaxContext<Keyspace> initWithThriftDriverWithEurekaHostsSupplier() {
logger.info("Boot cluster (BOOT_CLUSTER) is {}, keyspace name (KS_NAME) is {}", BOOT_CLUSTER, KS_NAME);
return new AstyanaxContext.Builder()
.forCluster(BOOT_CLUSTER)
.forKeyspace(KS_NAME)
.withAstyanaxConfiguration(
new AstyanaxConfigurationImpl()
.setDiscoveryType(
NodeDiscoveryType.DISCOVERY_SERVICE))
.withConnectionPoolConfiguration(
new ConnectionPoolConfigurationImpl(
"MyConnectionPool")
.setMaxConnsPerHost(3)
.setPort(thriftPortForAstyanax))
.withHostSupplier(eurekaHostsSupplier.getSupplier(BOOT_CLUSTER))
.withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
.buildKeyspace(ThriftFamilyFactory.getInstance());
}
private AstyanaxContext<Keyspace> initWithThriftDriverWithExternalHostsSupplier() {
logger.info("Boot cluster (BOOT_CLUSTER) is {}, keyspace name (KS_NAME) is {}", BOOT_CLUSTER, KS_NAME);
return new AstyanaxContext.Builder()
.forCluster(BOOT_CLUSTER)
.forKeyspace(KS_NAME)
.withAstyanaxConfiguration(
new AstyanaxConfigurationImpl()
.setDiscoveryType(
NodeDiscoveryType.DISCOVERY_SERVICE)
.setConnectionPoolType(
ConnectionPoolType.ROUND_ROBIN))
.withConnectionPoolConfiguration(
new ConnectionPoolConfigurationImpl(
"MyConnectionPool")
.setMaxConnsPerHost(3)
.setPort(thriftPortForAstyanax))
.withHostSupplier(getSupplier())
.withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
.buildKeyspace(ThriftFamilyFactory.getInstance());
}
private Supplier<List<Host>> getSupplier() {
return new Supplier<List<Host>>() {
@Override
public List<Host> get() {
List<Host> hosts = new ArrayList<>();
List<String> cassandraHostnames = new ArrayList<>(Arrays.asList(StringUtils.split(config.getCommaSeparatedCassandraHostNames(), ",")));
if (cassandraHostnames.size() == 0) {
throw new RuntimeException("Cassandra host names can not be blank, at least one host is needed." +
"Please use getCommaSeparatedCassandraHostNames() property.");
}
for (String cassHost : cassandraHostnames) {
logger.info("Adding Cassandra host {}", cassHost);
hosts.add(new Host(cassHost, thriftPortForAstyanax));
}
return hosts;
}
};
}
}
| 5,508 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/IMembership.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import com.google.inject.ImplementedBy;
import com.netflix.raigad.aws.AWSMembership;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* Interface to manage membership meta information such as size of RAC, list of
* nodes in RAC etc. Also perform ACL updates used in multi-regional clusters
*/
@ImplementedBy(AWSMembership.class)
public interface IMembership {
/**
* Get a list of instances per RAC
*/
Map<String, List<String>> getRacMembership(Collection<String> autoScalingGroupNames);
/**
* @return Size of current RAC
*/
int getRacMembershipSize();
/**
* Number of RACs
*/
int getRacCount();
/**
* Add security group ACLs
*
* @param listIPs
* @param from
* @param to
*/
void addACL(Collection<String> listIPs, int from, int to);
/**
* Remove security group ACLs
*
* @param listIPs
* @param from
* @param to
*/
void removeACL(Collection<String> listIPs, int from, int to);
/**
* List all ACLs
*/
List<String> listACL(int from, int to);
/**
* Expand the membership size by 1
*
* @param count
*/
void expandRacMembership(int count);
/**
* Return from-to ports for given ACL
* @param acl
* @return ACL to ports map (from-to), eg. 1.2.3.4 -> 5001, 5002
*/
Map<String, List<Integer>> getACLPortMap(String acl);
} | 5,509 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/IRaigadInstanceFactory.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import java.util.List;
import java.util.Map;
/**
* Interface for managing Elasticsearch instance data.
* Provides functionality to register, update, delete or list instances from the registry.
*/
public interface IRaigadInstanceFactory {
/**
* Return a list of all Elasticsearch server nodes registered.
* @param appName the cluster name
* @return a list of all nodes in {@code appName}
*/
List<RaigadInstance> getAllIds(String appName);
/**
* Return the Elasticsearch server node with the given {@code id}.
* @param appName the cluster name
* @param id the node id
* @return the node with the given {@code id}, or {@code null} if none found
*/
RaigadInstance getInstance(String appName, String dc, String id);
/**
* Create/Register an instance of the server with its info.
* @param app
* @param id
* @param instanceID
* @param hostname
* @param ip
* @param rac
* @param dc
* @param asgname
* @param volumes
* @return the new node
*/
RaigadInstance create(String app, String id, String instanceID,
String hostname, String ip, String rac, String dc,
String asgname, Map<String, Object> volumes);
/**
* Delete the server node from the registry
* @param inst the node to delete
*/
void delete(RaigadInstance inst);
/**
* Update the details of the server node in registry
* @param inst the node to update
*/
void update(RaigadInstance inst);
/**
* Sort the list by instance ID
* @param return_ the list of nodes to sort
*/
void sort(List<RaigadInstance> return_);
/**
* Attach volumes if required
* @param instance
* @param mountPath
* @param device
*/
void attachVolumes(RaigadInstance instance, String mountPath, String device);
}
| 5,510 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/identity/EurekaHostsSupplier.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.identity;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.shared.Application;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* Simple class that implements HostSupplier. It provides a Supplier<List<Host>>
* using the {DiscoveryManager} which is the eureka client.
*
* Note that the class needs the eureka application name to discover all instances for that application.
*
*/
@Singleton
public class EurekaHostsSupplier implements HostSupplier {
private static final Logger LOG = LoggerFactory.getLogger(EurekaHostsSupplier.class);
private final DiscoveryClient discoveryClient;
@Inject
public EurekaHostsSupplier(DiscoveryClient discoveryClient) {
this.discoveryClient = discoveryClient;
}
@Override
public Supplier<List<Host>> getSupplier(final String clusterName)
{
return new Supplier<List<Host>>() {
@Override
public List<Host> get() {
if (discoveryClient == null) {
LOG.error("Discovery client cannot be null");
throw new RuntimeException("EurekaHostsSupplier needs a non-null DiscoveryClient");
}
LOG.debug("Raigad fetching instance list for app: " + clusterName);
Application app = discoveryClient.getApplication(clusterName.toUpperCase());
List<Host> hosts = new ArrayList<Host>();
if (app == null) {
LOG.warn("Cluster '{}' not found in eureka", clusterName);
return hosts;
}
List<InstanceInfo> ins = app.getInstances();
if (ins == null || ins.isEmpty()) {
LOG.warn("Cluster '{}' found in eureka but has no instances", clusterName);
return hosts;
}
hosts = Lists.newArrayList(Collections2.transform(
Collections2.filter(ins, new Predicate<InstanceInfo>() {
@Override
public boolean apply(InstanceInfo input) {
return input.getStatus() == InstanceInfo.InstanceStatus.UP;
}
}), new Function<InstanceInfo, Host>() {
@Override
public Host apply(InstanceInfo info) {
String[] parts = StringUtils.split(
StringUtils.split(info.getHostName(), ".")[0], '-');
Host host = new Host(info.getHostName(), info.getPort())
.addAlternateIpAddress(
StringUtils.join(new String[] { parts[1], parts[2], parts[3],
parts[4] }, "."))
.addAlternateIpAddress(info.getIPAddr())
.setId(info.getId());
try {
if (info.getDataCenterInfo() instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo)info.getDataCenterInfo();
host.setRack(amazonInfo.get(MetaDataKey.availabilityZone));
}
}
catch (Throwable t) {
LOG.error("Error getting rack for host " + host.getName(), t);
}
return host;
}
}));
LOG.debug("Raigad found hosts from eureka - num hosts: " + hosts.size());
return hosts;
}
};
}
} | 5,511 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/resources/ElasticsearchAdmin.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.resources;
import com.google.inject.Inject;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.defaultimpl.IElasticsearchProcess;
import com.netflix.raigad.indexmanagement.ElasticsearchIndexManager;
import com.netflix.raigad.utils.SystemUtils;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
@Path("/v1/esadmin")
@Produces(MediaType.APPLICATION_JSON)
public class ElasticsearchAdmin {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchAdmin.class);
private static final String REST_SUCCESS = "[\"ok\"]";
private static final String SHARD_REALLOCATION_PROPERTY = "cluster.routing.allocation.enable";
private final IConfiguration config;
private final IElasticsearchProcess esProcess;
private final ElasticsearchIndexManager esIndexManager;
@Inject
public ElasticsearchAdmin(IConfiguration config, IElasticsearchProcess esProcess, ElasticsearchIndexManager esIndexManager) {
this.config = config;
this.esProcess = esProcess;
this.esIndexManager = esIndexManager;
}
@GET
@Path("/start")
public Response esStart() throws IOException {
logger.info("Starting Elasticsearch now through a REST call...");
esProcess.start();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/stop")
public Response esStop() throws IOException {
logger.info("Stopping Elasticsearch now through a REST call...");
esProcess.stop();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/run_indexmanager")
public Response manageIndex() throws Exception {
logger.info("Running index manager through a REST call...");
esIndexManager.runIndexManagement();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/existingRepositories")
public Response esExistingRepositories() throws Exception {
logger.info("Retrieving existing repositories through a REST call...");
String URL = "http://127.0.0.1:" + config.getHttpPort() + "/_snapshot/";
String RESPONSE = SystemUtils.runHttpGetCommand(URL);
JSONObject jsonObject = (JSONObject) new JSONParser().parse(RESPONSE);
return Response.ok(jsonObject, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/shard_allocation_enable/{type}")
public Response esShardAllocationEnable(@PathParam("type") String type) throws IOException {
logger.info("Enabling shard allocation through a REST call...");
if (!type.equalsIgnoreCase("transient") && !type.equalsIgnoreCase("persistent")) {
throw new IOException("Parameter must be equal to transient or persistent");
}
String url = "http://127.0.0.1:" + config.getHttpPort() + "/_cluster/settings";
JSONObject settings = new JSONObject();
JSONObject property = new JSONObject();
property.put(SHARD_REALLOCATION_PROPERTY, "all");
settings.put(type, property);
String response = SystemUtils.runHttpPutCommand(url, settings.toJSONString());
return Response.ok(response, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/shard_allocation_disable/{type}")
public Response esShardAllocationDisable(@PathParam("type") String type) throws IOException {
logger.info("Disabling shard allocation through a REST call...");
if (!type.equalsIgnoreCase("transient") && !type.equalsIgnoreCase("persistent")) {
throw new IOException("Parameter must be equal to transient or persistent");
}
String url = "http://127.0.0.1:" + config.getHttpPort() + "/_cluster/settings";
JSONObject settings = new JSONObject();
JSONObject property = new JSONObject();
property.put(SHARD_REALLOCATION_PROPERTY, "none");
settings.put(type, property);
SystemUtils.runHttpPutCommand(url, settings.toJSONString());
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
}
| 5,512 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/resources/NodeHealthCheck.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.resources;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* Created by alfasi on 4/23/15.
*/
@Path("/v1/healthcheck")
@Produces(MediaType.APPLICATION_JSON)
public class NodeHealthCheck {
private static final Logger logger = LoggerFactory.getLogger(NodeHealthCheck.class);
private static final String REST_SUCCESS = "[\"ok\"]";
@GET
@Path("/isesprocessrunning")
public Response checkHealth()
{
logger.info("Got REST call to check Node-health...");
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
return Response.serverError().status(500).build();
}
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
}
| 5,513 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/resources/ElasticsearchConfig.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.resources;
import com.google.gson.JsonObject;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.raigad.configuration.IConfigSource;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.RaigadInstance;
import com.netflix.raigad.startup.RaigadServer;
import com.netflix.raigad.utils.ElasticsearchUtils;
import com.netflix.raigad.utils.TribeUtils;
import org.apache.commons.lang.StringUtils;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Stream;
/**
* This servlet will provide the configuration API service as and when Elasticsearch requests for it.
*/
@Path("/v1/esconfig")
public class ElasticsearchConfig {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchConfig.class);
private final RaigadServer raigadServer;
private final TribeUtils tribeUtils;
private final IConfigSource configSrc;
@Inject
public ElasticsearchConfig(RaigadServer raigadServer, TribeUtils tribeUtils, @Named("custom") IConfigSource configSrc, IConfiguration config) {
this.raigadServer = raigadServer;
this.tribeUtils = tribeUtils;
this.configSrc = configSrc;
this.configSrc.initialize(config);
}
@GET
@Path("/get_nodes")
@Produces(MediaType.TEXT_PLAIN)
public Response getNodes() {
try {
logger.info("Getting cluster nodes");
final List<RaigadInstance> instances = raigadServer.getInstanceManager().getAllInstances();
if (instances == null) {
logger.error("Error getting cluster nodes");
return Response.serverError().build();
}
logger.info("Got {} instances", instances.size());
JSONObject raigadJson = ElasticsearchUtils.transformRaigadInstanceToJson(instances);
return Response.ok(raigadJson.toString()).build();
} catch (Exception e) {
logger.error("Error getting nodes (getNodes)", e);
return Response.serverError().build();
}
}
@GET
@Path("/get_tribe_nodes/{id}")
@Produces(MediaType.TEXT_PLAIN)
public Response getTribeNodes(@PathParam("id") String id) {
try {
logger.info("Getting nodes for the source tribe cluster [{}]", id);
// Find source cluster name from the tribe ID by reading YAML file
String sourceTribeClusterName = tribeUtils.getTribeClusterNameFromId(id);
if (StringUtils.isEmpty(sourceTribeClusterName)) {
logger.error("Source tribe cluster name is null or empty, check configuration");
return Response.serverError().build();
}
logger.info("Found source tribe cluster {} with ID [{}]", sourceTribeClusterName, id);
final List<RaigadInstance> instances =
raigadServer.getInstanceManager().getAllInstancesPerCluster(sourceTribeClusterName);
if (instances == null) {
logger.error("Error getting source tribe cluster nodes for {}", sourceTribeClusterName);
return Response.serverError().build();
}
logger.info("Got {} instances for {}", instances.size(), sourceTribeClusterName);
JSONObject raigadJson = ElasticsearchUtils.transformRaigadInstanceToJson(instances);
return Response.ok(raigadJson.toString()).build();
} catch (Exception e) {
logger.error("Exception getting nodes (getTribeNodes)", e);
return Response.serverError().build();
}
}
@GET
@Path("/get_prop/{names}")
@Produces(MediaType.APPLICATION_JSON)
/*
A means to fetch Fast Properties via REST
@param names - comma separated list of property name
*/
public Response getProperty(@PathParam("names") String propNames) {
if (propNames.isEmpty())
return Response.status(Response.Status.NO_CONTENT).build();
JsonObject fastPropResults = new JsonObject();
final String[] pNamesSplit = propNames.split(",");
final Stream<String> pNamesStream = Arrays.stream(pNamesSplit);
pNamesStream.forEach(
(propName) -> {
try{
String s = this.configSrc.get(propName);
fastPropResults.addProperty(propName, s);
} catch (Exception e) {
Response.ok("Exception fetcing property " + propName + ", msg: " + e.getLocalizedMessage()).build();
}
}
);
return Response.ok(fastPropResults.toString()).build();
}
} | 5,514 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/resources/InjectedWebListener.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.resources;
import com.google.inject.AbstractModule;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.Scopes;
import com.google.inject.name.Names;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.raigad.aws.IAMCredential;
import com.netflix.raigad.aws.ICredential;
import com.netflix.raigad.backup.AbstractRepository;
import com.netflix.raigad.backup.AbstractRepositorySettingsParams;
import com.netflix.raigad.backup.S3Repository;
import com.netflix.raigad.backup.S3RepositorySettingsParams;
import com.netflix.raigad.configuration.CompositeConfigSource;
import com.netflix.raigad.configuration.CustomConfigSource;
import com.netflix.raigad.configuration.IConfigSource;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.CassandraInstanceFactory;
import com.netflix.raigad.identity.EurekaHostsSupplier;
import com.netflix.raigad.identity.HostSupplier;
import com.netflix.raigad.identity.IRaigadInstanceFactory;
import com.netflix.raigad.scheduler.GuiceJobFactory;
import com.netflix.raigad.startup.RaigadServer;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import org.quartz.SchedulerFactory;
import org.quartz.impl.StdSchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class InjectedWebListener extends GuiceServletContextListener {
protected static final Logger logger = LoggerFactory.getLogger(InjectedWebListener.class);
@Override
protected Injector getInjector() {
List<Module> moduleList = new ArrayList<>();
moduleList.add(new JaxServletModule());
moduleList.add(new RaigadGuiceModule());
Injector injector;
try {
injector = LifecycleInjector.builder().withModules(moduleList).build().createInjector();
startJobs(injector);
LifecycleManager manager = injector.getInstance(LifecycleManager.class);
manager.start();
}
catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(), e);
}
return injector;
}
private void startJobs(Injector injector) throws Exception {
injector.getInstance(IConfiguration.class).initialize();
logger.info("** Now starting to initialize Raigad server from OSS");
injector.getInstance(RaigadServer.class).initialize();
}
private static class JaxServletModule extends ServletModule {
@Override
protected void configureServlets() {
Map<String, String> params = new HashMap<String, String>();
params.put(PackagesResourceConfig.PROPERTY_PACKAGES, "unbound");
params.put("com.sun.jersey.config.property.packages", "com.netflix.raigad.resources");
params.put(ServletContainer.PROPERTY_FILTER_CONTEXT_PATH, "/REST");
serve("/REST/*").with(GuiceContainer.class, params);
}
}
private static class RaigadGuiceModule extends AbstractModule {
@Override
protected void configure() {
logger.info("** Binding OSS Config classes.");
// Fix bug in Jersey-Guice integration exposed by child injectors
binder().bind(GuiceContainer.class).asEagerSingleton();
binder().bind(GuiceJobFactory.class).asEagerSingleton();
binder().bind(IRaigadInstanceFactory.class).to(CassandraInstanceFactory.class);
// TODO: Use config.getCredentialProvider() instead of IAMCredential
binder().bind(ICredential.class).to(IAMCredential.class);
binder().bind(AbstractRepository.class).annotatedWith(Names.named("s3")).to(S3Repository.class);
binder().bind(AbstractRepositorySettingsParams.class).annotatedWith(Names.named("s3")).to(S3RepositorySettingsParams.class);
bind(SchedulerFactory.class).to(StdSchedulerFactory.class).asEagerSingleton();
bind(HostSupplier.class).to(EurekaHostsSupplier.class).in(Scopes.SINGLETON);
binder().bind(IConfigSource.class).annotatedWith(Names.named("custom")).to(CompositeConfigSource.class);
}
}
}
| 5,515 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/resources/ElasticsearchBackup.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.resources;
import com.google.inject.Inject;
import com.netflix.raigad.backup.RestoreBackupManager;
import com.netflix.raigad.backup.SnapshotBackupManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@Path("/v1/esbackup")
@Produces(MediaType.APPLICATION_JSON)
public class ElasticsearchBackup {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchBackup.class);
private static final String REST_SUCCESS = "[\"ok\"]";
private static final String REST_REPOSITORY_NAME = "repository_name";
private static final String REST_REPOSITORY_TYPE = "repository_type";
private static final String REST_SNAPSHOT_NAME = "snapshot";
private static final String REST_INDICES_NAME = "indices";
private static final String REST_RESTORE_RENAME_PATTERN = "rename_pattern";
private static final String REST_RESTORE_RENAME_REPLACEMENT = "rename_replacement";
private final SnapshotBackupManager snapshotBackupManager;
private final RestoreBackupManager restoreBackupManager;
@Inject
public ElasticsearchBackup(SnapshotBackupManager snapshotBackupManager, RestoreBackupManager restoreBackupManager) {
this.snapshotBackupManager = snapshotBackupManager;
this.restoreBackupManager = restoreBackupManager;
}
@GET
@Path("/do_snapshot")
public Response snapshot() throws Exception {
logger.info("Running snapshot through a REST call...");
snapshotBackupManager.runSnapshotBackup();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/do_restore")
public Response restore(@QueryParam(REST_REPOSITORY_NAME) String repoName,
@QueryParam(REST_REPOSITORY_TYPE) String repoType,
@QueryParam(REST_SNAPSHOT_NAME) String snapName,
@QueryParam(REST_INDICES_NAME) String indicesName) throws Exception {
logger.info("Running restore through a REST call...");
restoreBackupManager.runRestore(repoName, repoType, snapName, indicesName, null, null);
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/do_restore_with_rename")
public Response restoreWithRename(@QueryParam(REST_REPOSITORY_NAME) String repoName,
@QueryParam(REST_REPOSITORY_TYPE) String repoType,
@QueryParam(REST_SNAPSHOT_NAME) String snapName,
@QueryParam(REST_INDICES_NAME) String indicesName,
@QueryParam(REST_RESTORE_RENAME_PATTERN) String renamePattern,
@QueryParam(REST_RESTORE_RENAME_REPLACEMENT) String renameReplacement) throws Exception {
logger.info("Running Restore with rename through REST call ...");
restoreBackupManager.runRestore(repoName, repoType, snapName, indicesName, renamePattern, renameReplacement);
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
}
| 5,516 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/resources/SecurityGroupAdmin.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.resources;
import com.google.inject.Inject;
import com.netflix.raigad.identity.IMembership;
import org.apache.commons.validator.routines.InetAddressValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.Collections;
/**
* This http endpoint allows direct updates (adding/removing) (CIDR) IP addresses and port
* ranges to the security group for this app.
*/
@Path("/v1/secgroup")
@Produces(MediaType.TEXT_PLAIN)
public class SecurityGroupAdmin
{
private static final Logger log = LoggerFactory.getLogger(SecurityGroupAdmin.class);
private static final Integer DEFAULT_MASK = 32;
private final IMembership membership;
@Inject
public SecurityGroupAdmin(IMembership membership)
{
this.membership = membership;
}
@POST
public Response addACL(
@QueryParam("ip") String ipAddress,
@QueryParam("mask") Integer mask,
@QueryParam("fromPort") int fromPort,
@QueryParam("toPort") int toPort)
{
if (!InetAddressValidator.getInstance().isValid(ipAddress)) {
log.error("Invalid IP address", ipAddress);
return Response.status(Response.Status.BAD_REQUEST).build();
}
if (mask == null || mask < 8) {
log.info("IP mask is too wide or not provided, using /32");
mask = DEFAULT_MASK;
}
try {
membership.addACL(Collections.singletonList(String.format("%s/%d", ipAddress, mask)), fromPort, toPort);
}
catch (Exception e) {
log.error("Error adding ACL to a security group", e);
return Response.serverError().build();
}
return Response.ok().build();
}
@DELETE
public Response removeACL(
@QueryParam("ip") String ipAddress,
@QueryParam("mask") Integer mask,
@QueryParam("fromPort") int fromPort,
@QueryParam("toPort") int toPort)
{
if (!InetAddressValidator.getInstance().isValid(ipAddress)) {
log.error("Invalid IP address", ipAddress);
return Response.status(Response.Status.BAD_REQUEST).build();
}
if (mask == null) {
log.info("IP mask not provided, using /32");
mask = DEFAULT_MASK;
}
try {
membership.removeACL(Collections.singletonList(String.format("%s/%d", ipAddress, mask)), fromPort, toPort);
}
catch (Exception e) {
log.error("Error removing ACL from a security group", e);
return Response.serverError().build();
}
return Response.ok().build();
}
}
| 5,517 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/TaskTimer.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import org.quartz.Trigger;
import java.text.ParseException;
/**
* Interface to represent time/interval
*/
public interface TaskTimer
{
public Trigger getTrigger() throws ParseException;
}
| 5,518 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/CronTimer.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import org.apache.commons.lang.StringUtils;
import org.quartz.CronTrigger;
import org.quartz.Scheduler;
import org.quartz.Trigger;
import java.text.ParseException;
/**
* Runs jobs at the specified absolute time and frequency
*/
public class CronTimer implements TaskTimer {
private String cronExpression;
private String triggerName;
public enum DayOfWeek {
SUN, MON, TUE, WED, THU, FRI, SAT
}
/**
* Hourly cron
*/
public CronTimer(int minute, int sec) {
cronExpression = sec + " " + minute + " * * * ?";
}
/**
* Hourly cron with explicit TriggerName
*/
public CronTimer(int minute, int sec, String triggerName) {
this.triggerName = triggerName;
cronExpression = sec + " " + minute + " * * * ?";
}
/**
* Daily cron
*/
public CronTimer(int hour, int minute, int sec) {
cronExpression = sec + " " + minute + " " + hour + " * * ?";
}
/**
* Daily cron with explicit TriggerName
*/
public CronTimer(int hour, int minute, int sec, String triggerName) {
this.triggerName = triggerName;
cronExpression = sec + " " + minute + " " + hour + " * * ?";
}
/**
* Weekly cron jobs
*/
public CronTimer(DayOfWeek dayofweek, int hour, int minute, int sec) {
cronExpression = sec + " " + minute + " " + hour + " * * " + dayofweek;
}
/**
* Cron expression
*/
public CronTimer(String expression) {
this.cronExpression = expression;
}
public Trigger getTrigger() throws ParseException {
if (StringUtils.isNotBlank(triggerName)) {
return new CronTrigger("CronTrigger" + triggerName, Scheduler.DEFAULT_GROUP, cronExpression);
} else {
return new CronTrigger("CronTrigger", Scheduler.DEFAULT_GROUP, cronExpression);
}
}
}
| 5,519 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/GuiceJobFactory.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import org.quartz.Job;
import org.quartz.JobDetail;
import org.quartz.SchedulerException;
import org.quartz.spi.JobFactory;
import org.quartz.spi.TriggerFiredBundle;
import com.google.inject.Inject;
import com.google.inject.Injector;
public class GuiceJobFactory implements JobFactory
{
public final Injector guice;
@Inject
public GuiceJobFactory(Injector guice)
{
this.guice = guice;
}
@Override
public Job newJob(TriggerFiredBundle bundle) throws SchedulerException
{
JobDetail jobDetail = bundle.getJobDetail();
Class<?> jobClass = jobDetail.getJobClass();
Job job = (Job) guice.getInstance(jobClass);
guice.injectMembers(job);
return job;
}
}
| 5,520 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/BlockingSubmitThreadPoolExecutor.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
/**
* {@link ThreadPoolExecutor} that will block in the {@code submit()} method
* until the task can be successfully added to the queue.
*/
public class BlockingSubmitThreadPoolExecutor extends ThreadPoolExecutor
{
private static final long DEFAULT_SLEEP = 100;
private static final long DEFAULT_KEEP_ALIVE = 100;
private static final Logger logger = LoggerFactory.getLogger(BlockingSubmitThreadPoolExecutor.class);
private BlockingQueue<Runnable> queue;
private long giveupTime;
private AtomicInteger active;
public BlockingSubmitThreadPoolExecutor(int maximumPoolSize, BlockingQueue<Runnable> workQueue, long timeoutAdding)
{
super(maximumPoolSize, maximumPoolSize, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, workQueue);
this.queue = workQueue;
this.giveupTime = timeoutAdding;
this.active = new AtomicInteger(0);
}
/**
* This is a thread safe way to avoid rejection exception... this is
* implemented because we might want to hold the incoming requests till
* there is a free thread.
*/
@Override
public <T> Future<T> submit(Callable<T> task)
{
synchronized (this)
{
active.incrementAndGet();
long timeout = 0;
while (queue.remainingCapacity() == 0)
{
try
{
if (timeout <= giveupTime)
{
Thread.sleep(DEFAULT_SLEEP);
timeout += DEFAULT_SLEEP;
}
else
{
throw new RuntimeException("Timed out because TPE is too busy...");
}
}
catch (InterruptedException e)
{
throw new RuntimeException(e);
}
}
return super.submit(task);
}
}
@Override
protected void afterExecute(Runnable r, Throwable t)
{
super.afterExecute(r, t);
active.decrementAndGet();
}
/**
* blocking call to test if the threads are done or not.
*/
public void sleepTillEmpty()
{
long timeout = 0;
while (!queue.isEmpty() || (active.get() > 0))
{
try
{
if (timeout <= giveupTime)
{
Thread.sleep(DEFAULT_SLEEP);
timeout += DEFAULT_SLEEP;
logger.debug("After Sleeping for empty: {}, Count: {}", +queue.size(), active.get());
}
else
{
throw new RuntimeException("Timed out because TPE is too busy...");
}
}
catch (InterruptedException e)
{
throw new RuntimeException(e);
}
}
}
}
| 5,521 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/ExecutionException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
public class ExecutionException extends Exception
{
private static final long serialVersionUID = 1L;
public ExecutionException(String msg, Throwable th)
{
super(msg, th);
}
public ExecutionException(String msg)
{
super(msg);
}
public ExecutionException(Exception ex)
{
super(ex);
}
public ExecutionException(Throwable th)
{
super(th);
}
}
| 5,522 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/NamedThreadPoolExecutor.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
public class NamedThreadPoolExecutor extends ThreadPoolExecutor
{
public NamedThreadPoolExecutor(int poolSize, String poolName)
{
this(poolSize, poolName, new LinkedBlockingQueue<Runnable>());
}
public NamedThreadPoolExecutor(int poolSize, String poolName, BlockingQueue<Runnable> queue)
{
super(poolSize, poolSize, 1000, TimeUnit.MILLISECONDS, queue,
new ThreadFactoryBuilder().setDaemon(true).setNameFormat(poolName + "-%d").build(),
new LocalRejectedExecutionHandler(queue));
}
private static class LocalRejectedExecutionHandler implements RejectedExecutionHandler
{
private final BlockingQueue<Runnable> queue;
LocalRejectedExecutionHandler(BlockingQueue<Runnable> queue)
{
this.queue = queue;
}
public void rejectedExecution(Runnable task, ThreadPoolExecutor executor)
{
while (true)
{
if (executor.isShutdown())
throw new RejectedExecutionException("ThreadPoolExecutor has shut down");
try
{
if (queue.offer(task, 1000, TimeUnit.MILLISECONDS))
break;
}
catch (InterruptedException e)
{
//NOP
}
}
}
}
}
| 5,523 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/RaigadScheduler.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.utils.Sleeper;
import org.quartz.JobDetail;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.ParseException;
/**
* Scheduling class to schedule Raigad tasks. Uses Quartz scheduler
*/
@Singleton
public class RaigadScheduler
{
private static final Logger logger = LoggerFactory.getLogger(RaigadScheduler.class);
private final Scheduler scheduler;
private final GuiceJobFactory jobFactory;
private final Sleeper sleeper;
@Inject
public RaigadScheduler(SchedulerFactory factory, GuiceJobFactory jobFactory, Sleeper sleeper)
{
try
{
this.scheduler = factory.getScheduler();
this.scheduler.setJobFactory(jobFactory);
this.jobFactory = jobFactory;
}
catch (SchedulerException e)
{
throw new RuntimeException(e);
}
this.sleeper = sleeper;
}
/**
* Add a task to the scheduler
*/
public void addTask(String name, Class<? extends Task> taskclass, TaskTimer timer) throws SchedulerException, ParseException
{
assert timer != null : "Cannot add scheduler task " + name + " as no timer is set";
JobDetail job = new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass);
scheduler.scheduleJob(job, timer.getTrigger());
}
/**
* Add a delayed task to the scheduler
*/
public void addTaskWithDelay(final String name, Class<? extends Task> taskclass, final TaskTimer timer, final int delayInSeconds) throws SchedulerException, ParseException
{
assert timer != null : "Cannot add scheduler task " + name + " as no timer is set";
final JobDetail job = new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass);
new Thread(new Runnable(){
public void run()
{
try
{
sleeper.sleepQuietly(delayInSeconds * 1000L);
scheduler.scheduleJob(job, timer.getTrigger());
}
catch (SchedulerException e)
{
logger.warn("problem occurred while scheduling a job with name " + name, e);
}
catch (ParseException e)
{
logger.warn("problem occurred while parsing a job with name " + name, e);
}
}
}).start();
}
public void runTaskNow(Class<? extends Task> taskclass) throws Exception
{
jobFactory.guice.getInstance(taskclass).execute(null);
}
public void deleteTask(String name) throws SchedulerException, ParseException
{
scheduler.deleteJob(name, Scheduler.DEFAULT_GROUP);
}
public final Scheduler getScheduler()
{
return scheduler;
}
public void shutdown()
{
try
{
scheduler.shutdown();
}
catch (SchedulerException e)
{
throw new RuntimeException(e);
}
}
public void start()
{
try
{
scheduler.start();
}
catch (SchedulerException ex)
{
throw new RuntimeException(ex);
}
}
}
| 5,524 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/Task.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import com.google.common.base.Throwables;
import com.netflix.raigad.configuration.IConfiguration;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Task class that should be implemented by all cron tasks. Jobconf will contain
* any instance specific data
*
* NOTE: Constructor must not throw any exception. This will cause Quartz to set the job to failure
*/
public abstract class Task implements Job, TaskMBean
{
public STATE status = STATE.DONE;
public static enum STATE
{
ERROR, RUNNING, DONE
}
protected final IConfiguration config;
private static final Logger logger = LoggerFactory.getLogger(Task.class);
private final AtomicInteger errors = new AtomicInteger();
private final AtomicInteger executions = new AtomicInteger();
protected Task(IConfiguration config)
{
this(config, ManagementFactory.getPlatformMBeanServer());
}
protected Task(IConfiguration config, MBeanServer mBeanServer) {
this.config = config;
// TODO: don't do mbean registration here
String mbeanName = "com.netflix.raigad.scheduler:type=" + this.getClass().getName();
try
{
mBeanServer.registerMBean(this, new ObjectName(mbeanName));
initialize();
}
catch (Exception e)
{
throw Throwables.propagate(e);
}
}
/**
* This method has to be implemented and cannot thow any exception.
*/
public void initialize() throws ExecutionException
{
// nothing to intialize
}
public abstract void execute() throws Exception;
/**
* Main method to execute a task
*/
public void execute(JobExecutionContext context) throws JobExecutionException
{
executions.incrementAndGet();
try
{
if (status == STATE.RUNNING)
return;
status = STATE.RUNNING;
execute();
}
catch (Exception e)
{
status = STATE.ERROR;
logger.error("Couldnt execute the task because of " + e.getMessage(), e);
errors.incrementAndGet();
}
catch (Throwable e)
{
status = STATE.ERROR;
logger.error("Couldnt execute the task because of " + e.getMessage(), e);
errors.incrementAndGet();
}
if (status != STATE.ERROR)
status = STATE.DONE;
}
public STATE state()
{
return status;
}
public int getErrorCount()
{
return errors.get();
}
public int getExecutionCount()
{
return executions.get();
}
public abstract String getName();
}
| 5,525 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/TaskMBean.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
/**
* MBean to monitor Task executions.
*
*/
public interface TaskMBean
{
public int getErrorCount();
public int getExecutionCount();
public String getName();
}
| 5,526 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/SimpleTimer.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.scheduler;
import org.quartz.Scheduler;
import org.quartz.SimpleTrigger;
import org.quartz.Trigger;
import java.text.ParseException;
import java.util.Date;
/**
* SimpleTimer allows jobs to run starting from specified time occurring at
* regular frequency's. Frequency of the execution timestamp since epoch.
*/
public class SimpleTimer implements TaskTimer
{
private SimpleTrigger trigger;
public SimpleTimer(String name, long interval)
{
this.trigger = new SimpleTrigger(name, SimpleTrigger.REPEAT_INDEFINITELY, interval);
}
/**
* Run once at given time...
*/
public SimpleTimer(String name, String group, long startTime)
{
this.trigger = new SimpleTrigger(name, group, new Date(startTime));
}
/**
* Run immediatly and dont do that again.
*/
public SimpleTimer(String name)
{
this.trigger = new SimpleTrigger(name, Scheduler.DEFAULT_GROUP);
}
public Trigger getTrigger() throws ParseException
{
trigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW);
return trigger;
}
}
| 5,527 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchTransportClientConnectionException.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import java.io.IOException;
public class ElasticsearchTransportClientConnectionException extends IOException {
private static final long serialVersionUID = 444L;
public ElasticsearchTransportClientConnectionException(String message) {
super(message);
}
public ElasticsearchTransportClientConnectionException(String message, Exception e) {
super(message, e);
}
}
| 5,528 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/TuneElasticsearch.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import java.io.IOException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
@Singleton
public class TuneElasticsearch extends Task
{
public static final String JOBNAME = "Tune-Elasticsearch";
private final IElasticsearchTuner tuner;
@Inject
public TuneElasticsearch(IConfiguration config, IElasticsearchTuner tuner)
{
super(config);
this.tuner = tuner;
}
public void execute() throws IOException
{
tuner.writeAllProperties(config.getYamlLocation(), null);
}
@Override
public String getName()
{
return "Tune-Elasticsearch";
}
public static TaskTimer getTimer()
{
return new SimpleTimer(JOBNAME);
}
}
| 5,529 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/SystemUtils.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.common.base.Charsets;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hashing;
import com.google.common.io.Files;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.util.List;
public class SystemUtils {
public static final String NOT_FOUND_STR = "NOT_FOUND";
private static final Logger logger = LoggerFactory.getLogger(SystemUtils.class);
public static String getDataFromUrl(String url) {
HttpURLConnection connection = null;
try {
connection = (HttpURLConnection) new URL(url).openConnection();
connection.setConnectTimeout(1000);
connection.setReadTimeout(1000);
connection.setRequestMethod("GET");
if (connection.getResponseCode() == 404) {
return NOT_FOUND_STR;
}
if (connection.getResponseCode() != 200) {
throw new RuntimeException("Unable to get data from " + url);
}
byte[] byteArray = new byte[2048];
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataInputStream dataInputStream = new DataInputStream((FilterInputStream) connection.getContent());
int character;
while ((character = dataInputStream.read(byteArray, 0, byteArray.length)) != -1) {
byteArrayOutputStream.write(byteArray, 0, character);
}
String requestResult = new String(byteArrayOutputStream.toByteArray(), Charsets.UTF_8);
logger.info("Calling URL API: {}, response: {}", url, requestResult);
return requestResult;
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
finally {
if (connection != null) {
connection.disconnect();
}
}
}
public static String runHttpGetCommand(String url) throws Exception {
DefaultHttpClient client = new DefaultHttpClient();
InputStream isStream = null;
try {
HttpParams httpParameters = new BasicHttpParams();
int timeoutConnection = 1000;
int timeoutSocket = 1000;
HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection);
HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket);
client.setParams(httpParameters);
HttpGet getRequest = new HttpGet(url);
getRequest.setHeader("Content-type", "application/json");
HttpResponse resp = client.execute(getRequest);
if (resp == null || resp.getEntity() == null) {
throw new ElasticsearchHttpException("Unable to execute GET URL (" + url + "), exception Message: < Null Response or Null HttpEntity >");
}
isStream = resp.getEntity().getContent();
if (resp.getStatusLine().getStatusCode() != 200) {
throw new ElasticsearchHttpException("Unable to execute GET URL (" + url + "), exception Message: (" + IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()) + ")");
}
String requestResult = IOUtils.toString(isStream, StandardCharsets.UTF_8.toString());
logger.debug("GET URL API: {} returns: {}", url, requestResult);
return requestResult;
}
catch (Exception e) {
throw new ElasticsearchHttpException("Caught an exception during execution of URL (" + url + "), exception Message: (" + e + ")");
}
finally {
if (isStream != null) {
isStream.close();
}
}
}
public static String runHttpPutCommand(String url, String jsonBody) throws IOException {
String return_;
DefaultHttpClient client = new DefaultHttpClient();
InputStream isStream = null;
try {
HttpParams httpParameters = new BasicHttpParams();
int timeoutConnection = 1000;
int timeoutSocket = 1000;
HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection);
HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket);
client.setParams(httpParameters);
HttpPut putRequest = new HttpPut(url);
putRequest.setEntity(new StringEntity(jsonBody, StandardCharsets.UTF_8));
putRequest.setHeader("Content-type", "application/json");
HttpResponse resp = client.execute(putRequest);
if (resp == null || resp.getEntity() == null) {
throw new ElasticsearchHttpException("Unable to execute PUT URL (" + url + "), exception message: < Null Response or Null HttpEntity >");
}
isStream = resp.getEntity().getContent();
if (resp.getStatusLine().getStatusCode() != 200) {
throw new ElasticsearchHttpException("Unable to execute PUT URL (" + url + "), exception message: (" + IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()) + ")");
}
String requestResult = IOUtils.toString(isStream, StandardCharsets.UTF_8.toString());
logger.debug("PUT URL API: {} with JSONBody {} returns: {}", url, jsonBody, requestResult);
return requestResult;
}
catch (Exception e) {
throw new ElasticsearchHttpException("Caught an exception during execution of URL (" + url + "), exception message: (" + e + ")");
}
finally {
if (isStream != null) {
isStream.close();
}
}
}
public static String runHttpPostCommand(String url, String jsonBody) throws IOException {
String return_;
DefaultHttpClient client = new DefaultHttpClient();
InputStream isStream = null;
try {
HttpParams httpParameters = new BasicHttpParams();
int timeoutConnection = 1000;
int timeoutSocket = 1000;
HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection);
HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket);
client.setParams(httpParameters);
HttpPost postRequest = new HttpPost(url);
if (StringUtils.isNotEmpty(jsonBody))
postRequest.setEntity(new StringEntity(jsonBody, StandardCharsets.UTF_8));
postRequest.setHeader("Content-type", "application/json");
HttpResponse resp = client.execute(postRequest);
if (resp == null || resp.getEntity() == null) {
throw new ElasticsearchHttpException("Unable to execute POST URL (" + url + ") Exception Message: < Null Response or Null HttpEntity >");
}
isStream = resp.getEntity().getContent();
if (resp.getStatusLine().getStatusCode() != 200) {
throw new ElasticsearchHttpException("Unable to execute POST URL (" + url + ") Exception Message: (" + IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()) + ")");
}
return_ = IOUtils.toString(isStream, StandardCharsets.UTF_8.toString());
logger.debug("POST URL API: {} with JSONBody {} returns: {}", url, jsonBody, return_);
} catch (Exception e) {
throw new ElasticsearchHttpException("Caught an exception during execution of URL (" + url + ")Exception Message: (" + e + ")");
} finally {
if (isStream != null)
isStream.close();
}
return return_;
}
/**
* delete all the files/dirs in the given Directory but dont delete the dir
* itself.
*/
public static void cleanupDir(String dirPath, List<String> childdirs) throws IOException {
if (childdirs == null || childdirs.size() == 0)
FileUtils.cleanDirectory(new File(dirPath));
else {
for (String cdir : childdirs)
FileUtils.cleanDirectory(new File(dirPath + "/" + cdir));
}
}
public static void createDirs(String location) {
File dirFile = new File(location);
if (dirFile.exists() && dirFile.isFile()) {
dirFile.delete();
dirFile.mkdirs();
} else if (!dirFile.exists())
dirFile.mkdirs();
}
public static byte[] md5(byte[] buf) {
try {
MessageDigest mdigest = MessageDigest.getInstance("MD5");
mdigest.update(buf, 0, buf.length);
return mdigest.digest();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Get a Md5 string which is similar to OS Md5sum
*/
public static String md5(File file) {
try {
HashCode hc = Files.hash(file, Hashing.md5());
return toHex(hc.asBytes());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static String toHex(byte[] digest) {
StringBuffer sb = new StringBuffer(digest.length * 2);
for (int i = 0; i < digest.length; i++) {
String hex = Integer.toHexString(digest[i]);
if (hex.length() == 1) {
sb.append("0");
} else if (hex.length() == 8) {
hex = hex.substring(6);
}
sb.append(hex);
}
return sb.toString().toLowerCase();
}
public static String toBase64(byte[] md5) {
byte encoded[] = Base64.encodeBase64(md5, false);
return new String(encoded);
}
public static String formatDate(DateTime dateTime, String dateFormat) {
DateTimeFormatter fmt = DateTimeFormat.forPattern(dateFormat);
return dateTime.toString(fmt);
}
public static String[] getSecurityGroupIds(String MAC_ID) {
String securityGroupIds = SystemUtils.getDataFromUrl(
"http://169.254.169.254/latest/meta-data/network/interfaces/macs/" + MAC_ID +
"/security-group-ids/").trim();
if (securityGroupIds.isEmpty()) {
throw new RuntimeException("Security group ID's are null or empty");
}
return securityGroupIds.split("\n");
}
} | 5,530 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/NFException.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
public class NFException {
private final String cfKey;
private final String pathName;
private final String stacktrace;
public NFException(String cfKey, String pathName, String stacktrace) {
this.cfKey = cfKey;
this.pathName = pathName;
this.stacktrace = stacktrace;
}
public String getCfKey() {
return cfKey;
}
public String getPathName() {
return pathName;
}
public String getStacktrace() {
return stacktrace;
}
}
| 5,531 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchTransportClient.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class ElasticsearchTransportClient {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchTransportClient.class);
private static AtomicReference<ElasticsearchTransportClient> elasticsearchTransportClientAtomicReference = new AtomicReference<>(null);
private final TransportClient client;
private final NodesStatsRequestBuilder nodeStatsRequestBuilder;
/**
* Hostname and port to talk to will be same server for now optionally we might want the IP to poll.
* NOTE: This class shouldn't be a singleton and this shouldn't be cached.
* This will work only if Elasticsearch runs.
*/
private ElasticsearchTransportClient(InetAddress host, IConfiguration configuration) {
logger.info("Initializing client connection to {}", host.toString());
Map<String, String> transportClientSettings = new HashMap<>();
transportClientSettings.put("cluster.name", configuration.getAppName());
client = new PreBuiltTransportClient(Settings.builder().put(transportClientSettings).build());
client.addTransportAddress(new InetSocketTransportAddress(host, configuration.getTransportTcpPort()));
nodeStatsRequestBuilder = client.admin().cluster().prepareNodesStats(configuration.getEsNodeName()).all();
}
/**
* Try to create if it is null
*
* @throws ElasticsearchTransportClientConnectionException
*/
public static ElasticsearchTransportClient instance(IConfiguration configuration) throws ElasticsearchTransportClientConnectionException {
if (elasticsearchTransportClientAtomicReference.get() == null) {
elasticsearchTransportClientAtomicReference.set(connect(configuration));
}
return elasticsearchTransportClientAtomicReference.get();
}
public static NodesStatsResponse getNodesStatsResponse(IConfiguration config) {
try {
return ElasticsearchTransportClient.instance(config).getNodeStatsRequestBuilder().execute().actionGet();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return null;
}
private static synchronized ElasticsearchTransportClient connect(final IConfiguration configuration) throws ElasticsearchTransportClientConnectionException {
ElasticsearchTransportClient transportClient;
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
logger.error("Elasticsearch is not yet started");
throw new ElasticsearchTransportClientConnectionException("Elasticsearch is not yet started");
}
try {
transportClient = new BoundedExponentialRetryCallable<ElasticsearchTransportClient>() {
@Override
public ElasticsearchTransportClient retriableCall() throws Exception {
return new ElasticsearchTransportClient(InetAddress.getLoopbackAddress(), configuration);
}
}.call();
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new ElasticsearchTransportClientConnectionException(e.getMessage());
}
return transportClient;
}
private NodesStatsRequestBuilder getNodeStatsRequestBuilder() {
return nodeStatsRequestBuilder;
}
public Client getTransportClient() {
return client;
}
}
| 5,532 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/FifoQueue.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import java.util.Comparator;
import java.util.TreeSet;
public class FifoQueue<E extends Comparable<E>> extends TreeSet<E>
{
private static final long serialVersionUID = -7388604551920505669L;
private int capacity;
public FifoQueue(int capacity)
{
super(new Comparator<E>()
{
@Override
public int compare(E o1, E o2)
{
return o1.compareTo(o2);
}
});
this.capacity = capacity;
}
public FifoQueue(int capacity, Comparator<E> comparator)
{
super(comparator);
this.capacity = capacity;
}
public synchronized void adjustAndAdd(E e)
{
add(e);
if (capacity < size())
pollFirst();
}
}
| 5,533 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/Sleeper.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.inject.ImplementedBy;
/**
* An abstraction to {@link Thread#sleep(long)} so we can mock it in tests.
*/
@ImplementedBy(ThreadSleeper.class)
public interface Sleeper
{
void sleep(long waitTimeMs) throws InterruptedException;
void sleepQuietly(long waitTimeMs);
}
| 5,534 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchUtils.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.RaigadInstance;
import org.apache.commons.lang.StringUtils;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
public class ElasticsearchUtils {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchUtils.class);
private static final String HOST_NAME = "host_name";
private static final String ID = "id";
private static final String APP_NAME = "app_name";
private static final String INSTANCE_ID = "instance_id";
private static final String AVAILABILITY_ZONE = "availability_zone";
private static final String PUBLIC_IP = "public_ip";
private static final String DC = "dc";
private static final String UPDATE_TIME = "update_time";
private static final String HTTP_TAG = "http://";
private static final String URL_PORT_SEPARATOR = ":";
private static final String ELASTICSEARCH_HTTP_PORT = "7104";
private static final String URL_PATH_SEPARATOR = "/";
private static final String URL_QUERY_SEPARATOR = "?";
private static final String REPOSITORY_VERIFICATION_PARAM = "_snapshot";
private static final String SNAPSHOT_COMPLETION_PARAM = "wait_for_completion=true";
private static final String DEFAULT_SNAPSHOT_IGNORE_AVAILABLE_PARAM = "true";
private static final char PATH_SEP = File.separatorChar;
private static final String S3_REPO_DATE_FORMAT = "yyyyMMdd";
private static final DateTimeZone currentZone = DateTimeZone.UTC;
@SuppressWarnings("unchecked")
public static JSONObject transformRaigadInstanceToJson(List<RaigadInstance> instances) {
JSONObject esJsonInstances = new JSONObject();
for (int i = 0; i < instances.size(); i++) {
JSONObject jsInstance = new JSONObject();
jsInstance.put(HOST_NAME, instances.get(i).getHostName());
jsInstance.put(ID, instances.get(i).getId());
jsInstance.put(APP_NAME, instances.get(i).getApp());
jsInstance.put(INSTANCE_ID, instances.get(i).getInstanceId());
jsInstance.put(AVAILABILITY_ZONE, instances.get(i).getAvailabilityZone());
jsInstance.put(PUBLIC_IP, instances.get(i).getHostIP());
jsInstance.put(DC, instances.get(i).getDC());
jsInstance.put(UPDATE_TIME, instances.get(i).getUpdatetime());
JSONArray esJsonInstance = new JSONArray();
esJsonInstance.add(jsInstance);
esJsonInstances.put("instance-" + i, jsInstance);
}
JSONObject allInstances = new JSONObject();
allInstances.put("instances", esJsonInstances);
return allInstances;
}
public static List<RaigadInstance> getRaigadInstancesFromJson(JSONObject instances) {
List<RaigadInstance> raigadInstances = new ArrayList<>();
JSONObject topLevelInstance = (JSONObject) instances.get("instances");
for (int i = 0; ; i++) {
if (topLevelInstance.get("instance-" + i) == null) {
break;
}
JSONObject eachInstance = (JSONObject) topLevelInstance.get("instance-" + i);
// Build RaigadInstance
RaigadInstance raigadInstance = new RaigadInstance();
raigadInstance.setApp((String) eachInstance.get(APP_NAME));
raigadInstance.setAvailabilityZone((String) eachInstance.get(AVAILABILITY_ZONE));
raigadInstance.setDC((String) eachInstance.get(DC));
raigadInstance.setHostIP((String) eachInstance.get(PUBLIC_IP));
raigadInstance.setHostName((String) eachInstance.get(HOST_NAME));
raigadInstance.setId((String) eachInstance.get(ID));
raigadInstance.setInstanceId((String) eachInstance.get(INSTANCE_ID));
raigadInstance.setUpdatetime((Long) eachInstance.get(UPDATE_TIME));
// Add to the list
raigadInstances.add(raigadInstance);
}
return raigadInstances;
}
public static boolean amIMasterNode(IConfiguration config, HttpModule httpModule) throws Exception {
String URL = httpModule.findMasterNodeURL();
String response = SystemUtils.runHttpGetCommand(URL);
if (config.isDebugEnabled()) {
logger.debug("Calling {} returned: {}", URL, response);
}
response = StringUtils.trim(response);
// Check the response
if (StringUtils.isEmpty(response)) {
logger.error("Response from " + URL + " is empty");
return false;
}
// Checking if the current node is a master node
if (response.equalsIgnoreCase(config.getHostIP()) || response.equalsIgnoreCase(config.getHostLocalIP())) {
return true;
}
return false;
}
public static List<String> getAvailableSnapshots(Client transportClient, String repositoryName) {
logger.info("Searching for available snapshots");
List<String> snapshots = new ArrayList<>();
GetSnapshotsResponse getSnapshotsResponse = transportClient.admin().cluster()
.prepareGetSnapshots(repositoryName)
.get();
for (SnapshotInfo snapshotInfo : getSnapshotsResponse.getSnapshots()) {
snapshots.add(snapshotInfo.snapshotId().getName());
}
return snapshots;
}
/**
* Repository Name is Today's Date in yyyyMMdd format eg. 20140630
*
* @return Repository Name
*/
public static String getS3RepositoryName() {
DateTime dateTime = new DateTime();
DateTime dateTimeGmt = dateTime.withZone(currentZone);
return formatDate(dateTimeGmt, S3_REPO_DATE_FORMAT);
}
public static String formatDate(DateTime dateTime, String dateFormat) {
DateTimeFormatter fmt = DateTimeFormat.forPattern(dateFormat);
return dateTime.toString(fmt);
}
}
| 5,535 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchHttpException.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import java.io.IOException;
public class ElasticsearchHttpException extends IOException
{
private static final long serialVersionUID = 444L;
public ElasticsearchHttpException(String message)
{
super(message);
}
public ElasticsearchHttpException(String message, Exception e)
{
super(message, e);
}
}
| 5,536 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/IElasticsearchTuner.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.inject.ImplementedBy;
import com.netflix.raigad.defaultimpl.StandardTuner;
import java.io.IOException;
@ImplementedBy(StandardTuner.class)
public interface IElasticsearchTuner {
void writeAllProperties(String yamlLocation, String hostname) throws IOException;
}
| 5,537 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/TribeUtils.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.Yaml;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.util.Map;
@Singleton
public class TribeUtils {
private static final Logger logger = LoggerFactory.getLogger(TribeUtils.class);
private final IConfiguration config;
@Inject
public TribeUtils(IConfiguration config) {
this.config = config;
}
public String getTribeClusterNameFromId(String tribeId) throws FileNotFoundException {
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
Yaml yaml = new Yaml(options);
File yamlFile = new File(config.getYamlLocation());
Map map = (Map) yaml.load(new FileInputStream(yamlFile));
String sourceClusterName = (String) map.get("tribe." + tribeId + ".cluster.name");
logger.info("Source cluster associated with tribe ID {} is {}", tribeId, sourceClusterName);
return sourceClusterName;
}
}
| 5,538 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ThreadSleeper.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
/**
* Sleeper impl that delegates to Thread.sleep
*/
public class ThreadSleeper implements Sleeper
{
@Override
public void sleep(long waitTimeMs) throws InterruptedException
{
Thread.sleep(waitTimeMs);
}
public void sleepQuietly(long waitTimeMs)
{
try
{
sleep(waitTimeMs);
}
catch (InterruptedException e)
{
//no-op
}
}
}
| 5,539 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/RetriableCallable.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
public abstract class RetriableCallable<T> implements Callable<T> {
public static final int DEFAULT_NUMBER_OF_RETRIES = 15;
public static final long DEFAULT_WAIT_TIME = 100;
private static final Logger logger = LoggerFactory.getLogger(RetriableCallable.class);
private int retries;
private long waitTime;
public RetriableCallable() {
this(DEFAULT_NUMBER_OF_RETRIES, DEFAULT_WAIT_TIME);
}
public RetriableCallable(int retries, long waitTime) {
set(retries, waitTime);
}
public void set(int retries, long waitTime) {
this.retries = retries;
this.waitTime = waitTime;
}
public abstract T retriableCall() throws Exception;
public T call() throws Exception {
int retry = 0;
int logCounter = 0;
while (true) {
try {
return retriableCall();
} catch (CancellationException e) {
throw e;
} catch (Exception e) {
retry++;
if (retry == retries) {
throw e;
}
logger.error(String.format("Retry #%d for: %s", retry, e.getMessage()));
if (++logCounter == 1) {
logger.error("Exception: " + ExceptionUtils.getFullStackTrace(e));
}
Thread.sleep(waitTime);
} finally {
forEachExecution();
}
}
}
public void forEachExecution() {
// Do nothing by default
}
} | 5,540 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ExponentialRetryCallable.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import java.util.concurrent.CancellationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class ExponentialRetryCallable<T> extends RetriableCallable<T>
{
public final static long MAX_SLEEP = 240000;
public final static long MIN_SLEEP = 200;
private static final Logger logger = LoggerFactory.getLogger(RetriableCallable.class);
private long max;
private long min;
public ExponentialRetryCallable()
{
this.max = MAX_SLEEP;
this.min = MIN_SLEEP;
}
public ExponentialRetryCallable(long minSleep, long maxSleep)
{
this.max = maxSleep;
this.min = minSleep;
}
public T call() throws Exception
{
long delay = min;// ms
while (true)
{
try
{
return retriableCall();
}
catch (CancellationException e)
{
throw e;
}
catch (Exception e)
{
delay *= 2;
if (delay > max)
{
throw e;
}
logger.error(e.getMessage());
Thread.sleep(delay);
}
finally
{
forEachExecution();
}
}
}
}
| 5,541 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/BoundedExponentialRetryCallable.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.CancellationException;
public abstract class BoundedExponentialRetryCallable<T> extends RetriableCallable<T> {
public final static long MAX_SLEEP = 10000;
public final static long MIN_SLEEP = 1000;
public final static int MAX_RETRIES = 10;
private static final Logger logger = LoggerFactory.getLogger(BoundedExponentialRetryCallable.class);
private long max;
private long min;
private int maxRetries;
private final ThreadSleeper sleeper = new ThreadSleeper();
public BoundedExponentialRetryCallable() {
this.max = MAX_SLEEP;
this.min = MIN_SLEEP;
this.maxRetries = MAX_RETRIES;
}
public BoundedExponentialRetryCallable(long minSleep, long maxSleep, int maxNumRetries) {
this.max = maxSleep;
this.min = minSleep;
this.maxRetries = maxNumRetries;
}
public T call() throws Exception {
long delay = min;// ms
int retry = 0;
int logCounter = 0;
while (true) {
try {
return retriableCall();
} catch (CancellationException e) {
throw e;
} catch (Exception e) {
retry++;
if (delay < max && retry <= maxRetries) {
delay *= 2;
logger.error(String.format("Retry #%d for: %s", retry, e.getMessage()));
if (++logCounter == 1) {
logger.info("Exception --> " + ExceptionUtils.getFullStackTrace(e));
}
sleeper.sleep(delay);
} else if (delay >= max && retry <= maxRetries) {
logger.error(String.format("Retry #%d for: %s", retry, ExceptionUtils.getFullStackTrace(e)));
sleeper.sleep(max);
} else {
throw e;
}
} finally {
forEachExecution();
}
}
}
}
| 5,542 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchProcessMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.concurrent.atomic.AtomicBoolean;
/*
* This task checks if the Elasticsearch process is running.
*/
@Singleton
public class ElasticsearchProcessMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchProcessMonitor.class);
public static final String JOB_NAME = "ES_MONITOR_THREAD";
static final AtomicBoolean isElasticsearchRunningNow = new AtomicBoolean(false);
static final AtomicBoolean wasElasticsearchStarted = new AtomicBoolean(false);
@Inject
protected ElasticsearchProcessMonitor(IConfiguration config) {
super(config);
}
@Override
public void execute() throws Exception {
checkElasticsearchProcess(config.getElasticsearchProcessName());
}
@Override
public String getName() {
return JOB_NAME;
}
Runtime getRuntime() {
return Runtime.getRuntime();
}
String getFirstLine(InputStream inputStream) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
return StringUtils.trim(bufferedReader.readLine());
}
void checkElasticsearchProcess(String elasticsearchProcessName) throws Exception {
Process pgrepProcess = null;
InputStream processInputStream = null;
try {
// This returns PID for the Elasticsearch process
pgrepProcess = getRuntime().exec("pgrep -f " + elasticsearchProcessName);
processInputStream = pgrepProcess.getInputStream();
String line = getFirstLine(processInputStream);
if (StringUtils.isNotEmpty(line) && !isElasticsearchRunning()) {
isElasticsearchRunningNow.set(true);
if (!wasElasticsearchStarted.get()) {
wasElasticsearchStarted.set(true);
}
} else if (StringUtils.isEmpty(line) && isElasticsearchRunning()) {
isElasticsearchRunningNow.set(false);
}
} catch (Exception e) {
logger.warn("Exception checking if process is running", e);
isElasticsearchRunningNow.set(false);
} finally {
if (processInputStream != null) {
processInputStream.close();
}
if (pgrepProcess != null) {
pgrepProcess.destroyForcibly();
}
}
}
public static TaskTimer getTimer() {
return new SimpleTimer(JOB_NAME, 10L * 1000);
}
public static Boolean isElasticsearchRunning() {
return isElasticsearchRunningNow.get();
}
public static Boolean getWasElasticsearchStarted() {
return wasElasticsearchStarted.get();
}
}
| 5,543 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/HttpModule.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.utils;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
@Singleton
public class HttpModule {
private static final String HTTP_TAG = "http://";
private static final String LOCAL_HOST = "127.0.0.1";
private static final String URL_PORT_SEPARATOR = ":";
private static final String URL_PATH_SEPARATOR = "/";
private static final String MASTER_NODE_SUFFIX = "/_cat/master?h=ip";
private static final String SNAPSHOT_BKP_KEYWORD = "/_snapshot/";
private static final String SNAPSHOT_BKP_WAIT_FOR_COMPLETION_TAG = "?wait_for_completion=";
private final IConfiguration config;
@Inject
public HttpModule(IConfiguration config) {
this.config = config;
}
public String findMasterNodeURL() {
StringBuilder builder = new StringBuilder();
builder.append(HTTP_TAG);
builder.append(LOCAL_HOST);
builder.append(URL_PORT_SEPARATOR);
builder.append(config.getHttpPort());
builder.append(MASTER_NODE_SUFFIX);
return builder.toString();
}
public String runSnapshotBackupURL(String repositoryName, String snapshotName) {
StringBuilder builder = new StringBuilder();
builder.append(HTTP_TAG);
builder.append(LOCAL_HOST);
builder.append(URL_PORT_SEPARATOR);
builder.append(config.getHttpPort());
builder.append(SNAPSHOT_BKP_KEYWORD);
builder.append(repositoryName);
builder.append(URL_PATH_SEPARATOR);
builder.append(snapshotName);
builder.append(SNAPSHOT_BKP_WAIT_FOR_COMPLETION_TAG);
builder.append(config.waitForCompletionOfBackup());
return builder.toString();
}
}
| 5,544 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/ElasticsearchProcessManager.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.defaultimpl;
import com.google.inject.Inject;
import com.netflix.raigad.configuration.IConfiguration;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.TimeUnit;
public class ElasticsearchProcessManager implements IElasticsearchProcess {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchProcessManager.class);
private static final int SCRIPT_EXECUTE_WAIT_TIME_MS = 5000;
private final IConfiguration config;
@Inject
public ElasticsearchProcessManager(IConfiguration config) {
this.config = config;
}
String[] getStartupCommand() {
return StringUtils.split(StringUtils.trimToEmpty(config.getElasticsearchStartupScript()), ' ');
}
String[] getStopCommand() {
return StringUtils.split(StringUtils.trimToEmpty(config.getElasticsearchStopScript()), ' ');
}
void runCommand(String[] command) {
Process process = null;
try {
ProcessBuilder processBuilder = new ProcessBuilder(command).redirectErrorStream(true);
process = processBuilder.start();
process.waitFor(SCRIPT_EXECUTE_WAIT_TIME_MS, TimeUnit.MILLISECONDS);
int exitCode = process.exitValue();
if (exitCode == 0) {
logger.info(String.format("Successfully executed %s", StringUtils.join(command, ' ')));
} else {
logger.error(String.format("Error executing %s, exited with code %d", StringUtils.join(command, ' '), exitCode));
}
} catch (Exception e) {
logger.error(String.format("Exception executing %s", StringUtils.join(command, ' ')), e);
} finally {
if (process != null) {
process.destroyForcibly();
}
}
}
public void start() {
logger.info("Starting Elasticsearch server");
String[] startupCommand = getStartupCommand();
if (startupCommand == null || startupCommand.length == 0) {
logger.warn("Elasticsearch startup command was not specified");
return;
}
runCommand(startupCommand);
}
public void stop() {
logger.info("Stopping Elasticsearch server");
String[] stopCommand = getStopCommand();
if (stopCommand == null || stopCommand.length == 0) {
logger.warn("Elasticsearch stop command was not specified");
return;
}
runCommand(stopCommand);
}
void logProcessOutput(Process process) {
InputStream inputStream = null;
try {
inputStream = process.getInputStream();
final String processOutputStream = readProcessStream(inputStream);
logger.info("Standard/Error out: {}", processOutputStream);
} catch (IOException e) {
logger.warn("Failed to read the standard/error output stream", e);
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
logger.warn("Failed to close the standard/error output stream", e);
}
}
}
}
private String readProcessStream(InputStream inputStream) throws IOException {
final byte[] buffer = new byte[512];
final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(buffer.length);
int count;
while ((count = inputStream.read(buffer)) != -1) {
byteArrayOutputStream.write(buffer, 0, count);
}
return byteArrayOutputStream.toString();
}
}
| 5,545 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/IElasticsearchProcess.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.defaultimpl;
import com.google.inject.ImplementedBy;
import java.io.IOException;
/**
* Interface to aid in starting and stopping Elasticsearch.
*/
@ImplementedBy(ElasticsearchProcessManager.class)
public interface IElasticsearchProcess {
void start() throws IOException;
void stop() throws IOException;
}
| 5,546 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/StandardTuner.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.defaultimpl;
import com.google.inject.Inject;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.utils.IElasticsearchTuner;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.Yaml;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
public class StandardTuner implements IElasticsearchTuner {
private static final Logger logger = LoggerFactory.getLogger(StandardTuner.class);
private static final String COMMA_SEPARATOR = ",";
private static final String PARAM_SEPARATOR = "=";
protected final IConfiguration config;
@Inject
public StandardTuner(IConfiguration config) {
this.config = config;
}
@SuppressWarnings({"unchecked", "rawtypes"})
public void writeAllProperties(String yamlLocation, String hostname) throws IOException {
logger.info("Using configuration of type [{}]", config.getClass());
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
Yaml yaml = new Yaml(options);
File yamlFile = new File(yamlLocation);
Map map = (Map) yaml.load(new FileInputStream(yamlFile));
map.put("cluster.name", config.getAppName());
map.put("node.name", config.getEsNodeName());
map.put("http.port", config.getHttpPort());
map.put("path.data", config.getDataFileLocation());
map.put("path.logs", config.getLogFileLocation());
if (config.isVPCExternal()) {
map.put("network.publish_host", config.getHostIP());
map.put("http.publish_host", config.getHostname());
}
else {
map.put("network.publish_host", "_global_");
}
if (config.isKibanaSetupRequired()) {
map.put("http.cors.enabled", true);
map.put("http.cors.allow-origin", "*");
}
if (config.amITribeNode()) {
String clusterParams = config.getCommaSeparatedSourceClustersForTribeNode();
assert (clusterParams != null) : "Source clusters for tribe nodes cannot be null";
String[] clusters = StringUtils.split(clusterParams, COMMA_SEPARATOR);
assert (clusters.length != 0) : "At least one source cluster is needed";
List<Integer> tribePorts = new ArrayList<>();
tribePorts.add(config.getTransportTcpPort());
// Common settings
for (int i = 0; i < clusters.length; i++) {
String[] clusterNameAndPort = clusters[i].split(PARAM_SEPARATOR);
assert (clusterNameAndPort.length != 2) : "Cluster name or transport port is missing in configuration";
assert (StringUtils.isNumeric(clusterNameAndPort[1])) : "Source tribe cluster port is invalid";
map.put("tribe.t" + i + ".cluster.name", clusterNameAndPort[0]);
map.put("tribe.t" + i + ".transport.tcp.port", Integer.parseInt(clusterNameAndPort[1]));
map.put("tribe.t" + i + ".discovery.zen.hosts_provider", config.getElasticsearchDiscoveryType());
map.put("tribe.t" + i + ".network.host", "_global_");
logger.info("Adding cluster [{}:{}]", clusterNameAndPort[0], clusterNameAndPort[1]);
tribePorts.add(Integer.valueOf(clusterNameAndPort[1]));
}
Collections.sort(tribePorts);
String transportPortRange = String.format("%d-%d", tribePorts.get(0), tribePorts.get(tribePorts.size() - 1));
logger.info("Setting tribe transport port range to {}", transportPortRange);
// Adding port range to include tribe cluster port as well as transport for each source cluster
map.put("transport.tcp.port", transportPortRange);
map.put("node.master", false);
map.put("node.data", false);
if (config.amIWriteEnabledTribeNode()) {
map.put("tribe.blocks.write", false);
}
else {
map.put("tribe.blocks.write", true);
}
if (config.amIMetadataEnabledTribeNode()) {
map.put("tribe.blocks.metadata", false);
}
else {
map.put("tribe.blocks.metadata", true);
}
map.put("tribe.on_conflict", "prefer_" + config.getTribePreferredClusterIdOnConflict());
}
else {
map.put("transport.tcp.port", config.getTransportTcpPort());
map.put("discovery.zen.hosts_provider", config.getElasticsearchDiscoveryType());
map.put("discovery.zen.minimum_master_nodes", config.getMinimumMasterNodes());
// NOTE: When using awareness attributes, shards will not be allocated to nodes that
// do not have values set for those attributes. Important in dedicated master nodes deployment
map.put("cluster.routing.allocation.awareness.attributes", config.getClusterRoutingAttributes());
if (config.isShardPerNodeEnabled()) {
map.put("cluster.routing.allocation.total_shards_per_node", config.getTotalShardsPerNode());
}
if (config.isMultiDC()) {
map.put("node.attr.rack_id", config.getDC());
}
else {
map.put("node.attr.rack_id", config.getRac());
}
if (config.isAsgBasedDedicatedDeployment()) {
if ("master".equalsIgnoreCase(config.getStackName())) {
map.put("node.master", true);
map.put("node.data", false);
map.put("node.ingest", false);
}
else if ("data".equalsIgnoreCase(config.getStackName())) {
map.put("node.master", false);
map.put("node.data", true);
map.put("node.ingest", false);
}
else if ("search".equalsIgnoreCase(config.getStackName())) {
map.put("node.master", false);
map.put("node.data", false);
map.put("node.ingest", true);
}
else {
map.put("node.master", false);
map.put("node.data", false);
map.put("node.ingest", false);
}
}
}
addExtraEsParams(map);
logger.info(yaml.dump(map));
yaml.dump(map, new FileWriter(yamlFile));
}
public void addExtraEsParams(Map map) {
String extraConfigParams = config.getExtraConfigParams();
if (extraConfigParams == null) {
logger.info("Updating elasticsearch.yml: no extra parameters");
return;
}
String[] pairs = extraConfigParams.trim().split(COMMA_SEPARATOR);
logger.info("Updating elasticsearch.yml: adding extra parameters");
for (String pair : pairs) {
String[] keyValue = pair.trim().split(PARAM_SEPARATOR);
String raigadKey = keyValue[0].trim();
String esKey = keyValue[1].trim();
String esValue = config.getEsKeyName(raigadKey);
logger.info("Updating YAML: Raigad key [{}], Elasticsearch key [{}], value [{}]", raigadKey, esKey, esValue);
if (raigadKey == null || esKey == null || esValue == null) {
logger.error("One of the extra keys or values is null, skipping...");
continue;
}
map.put(esKey, esValue);
}
}
}
| 5,547 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/ElasticsearchInstance.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.defaultimpl;
import java.io.Serializable;
public class ElasticsearchInstance implements Serializable {
private static final long serialVersionUID = 5606412386974488659L;
private String hostname;
private long updatetime;
private int Id;
private String cluster;
private String instanceId;
private String availabilityZone;
private String publicip;
private String region;
public int getId() {
return Id;
}
public void setId(int id) {
Id = id;
}
public String getCluster() {
return cluster;
}
public ElasticsearchInstance setCluster(String cluster) {
this.cluster = cluster;
return this;
}
public String getInstanceId() {
return instanceId;
}
public ElasticsearchInstance setInstanceId(String instanceId) {
this.instanceId = instanceId;
return this;
}
public String getAvailabilityZone() {
return availabilityZone;
}
public ElasticsearchInstance setAvailabilityZone(String availabilityZone) {
this.availabilityZone = availabilityZone;
return this;
}
public String getHostName() {
return hostname;
}
public String getHostIP() {
return publicip;
}
public ElasticsearchInstance setHostName(String hostname) {
this.hostname = hostname;
return this;
}
public ElasticsearchInstance setHostIP(String publicip) {
this.publicip = publicip;
return this;
}
@Override
public String toString() {
return String
.format("Hostname: %s, InstanceId: %s, Cluster_: %s, Availability Zone : %s Region %s",
getHostName(), getInstanceId(), getCluster(),
getAvailabilityZone(), getRegion());
}
public String getRegion() {
return region;
}
public ElasticsearchInstance setRegion(String location) {
this.region = location;
return this;
}
public long getUpdatetime() {
return updatetime;
}
public void setUpdatetime(long updatetime) {
this.updatetime = updatetime;
}
}
| 5,548 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/ClearCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Properties;
/**
* This is a basic implementation of ICredentials. User should prefer to
* implement their own versions for more secured access. This class requires
* clear AWS key and access.
*
* Set the following properties in "conf/awscredntial.properties"
*
*/
public class ClearCredential implements ICredential
{
private static final Logger logger = LoggerFactory.getLogger(ClearCredential.class);
private static final String CRED_FILE = "/etc/awscredential.properties";
private final Properties props;
private final String AWS_ACCESS_ID;
private final String AWS_KEY;
public ClearCredential()
{
FileInputStream fis = null;
try
{
fis = new FileInputStream(CRED_FILE);
props = new Properties();
props.load(fis);
AWS_ACCESS_ID = props.getProperty("AWSACCESSID") != null ? props.getProperty("AWSACCESSID").trim() : "";
AWS_KEY = props.getProperty("AWSKEY") != null ? props.getProperty("AWSKEY").trim() : "";
}
catch (Exception e)
{
logger.error("Exception with credential file ", e);
throw new RuntimeException("Problem reading credential file. Cannot start.", e);
}
finally
{
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
public String getAccessKeyId()
{
return AWS_ACCESS_ID;
}
public String getSecretAccessKey()
{
return AWS_KEY;
}
public AWSCredentials getCredentials()
{
return new BasicAWSCredentials(getAccessKeyId(), getSecretAccessKey());
}
@Override
public AWSCredentialsProvider getAwsCredentialProvider() {
return new AWSCredentialsProvider(){
public AWSCredentials getCredentials(){
return ClearCredential.this.getCredentials();
}
@Override
public void refresh() {
// NOP
}
};
}
}
| 5,549 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/AWSMembership.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.*;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.IMembership;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* Class to query amazon ASG for its members to provide - Number of valid nodes
* in the ASG - Number of zones - Methods for adding ACLs for the nodes
*/
public class AWSMembership implements IMembership {
private static final Logger logger = LoggerFactory.getLogger(AWSMembership.class);
private final IConfiguration config;
private final ICredential provider;
@Inject
public AWSMembership(IConfiguration config, ICredential provider) {
this.config = config;
this.provider = provider;
}
@Override
public Map<String, List<String>> getRacMembership(Collection<String> autoScalingGroupNames) {
if (CollectionUtils.isEmpty(autoScalingGroupNames)) {
return Collections.emptyMap();
}
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest =
new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(autoScalingGroupNames);
DescribeAutoScalingGroupsResult describeAutoScalingGroupsResult = client.describeAutoScalingGroups(describeAutoScalingGroupsRequest);
Map<String, List<String>> asgs = new HashMap<>();
for (AutoScalingGroup autoScalingGroup : describeAutoScalingGroupsResult.getAutoScalingGroups()) {
List<String> asgInstanceIds = Lists.newArrayList();
for (Instance asgInstance : autoScalingGroup.getInstances()) {
if (!(asgInstance.getLifecycleState().equalsIgnoreCase("terminating") ||
asgInstance.getLifecycleState().equalsIgnoreCase("shutting-down") ||
asgInstance.getLifecycleState().equalsIgnoreCase("terminated"))) {
asgInstanceIds.add(asgInstance.getInstanceId());
}
}
asgs.put(autoScalingGroup.getAutoScalingGroupName(), asgInstanceIds);
logger.info("AWS returned the following instance ID's for {} ASG: {}",
autoScalingGroup.getAutoScalingGroupName(),
StringUtils.join(asgInstanceIds, ","));
}
return asgs;
} finally {
if (client != null) {
client.shutdown();
}
}
}
/**
* Actual membership AWS source of truth...
*/
@Override
public int getRacMembershipSize() {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(config.getASGName());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
int size = 0;
for (AutoScalingGroup asg : res.getAutoScalingGroups()) {
size += asg.getMaxSize();
}
logger.info(String.format("Query on ASG returning %d instances", size));
return size;
} finally {
if (client != null) {
client.shutdown();
}
}
}
@Override
public int getRacCount() {
return config.getRacs().size();
}
/**
* Adds a list of IP's to the SG
*/
public void addACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<IpPermission>();
ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to));
if (config.isDeployedInVPC()) {
if (config.getACLGroupIdForVPC().isEmpty()) {
throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors");
}
client.authorizeSecurityGroupIngress(
new AuthorizeSecurityGroupIngressRequest()
.withGroupId(config.getACLGroupIdForVPC())
.withIpPermissions(ipPermissions));
} else {
client.authorizeSecurityGroupIngress(
new AuthorizeSecurityGroupIngressRequest(config.getACLGroupName(), ipPermissions));
}
logger.info("Added " + StringUtils.join(listIPs, ",") + " to ACL");
} finally {
if (client != null) {
client.shutdown();
}
}
}
/**
* Removes a list of IP's from the SG
*/
public void removeACL(Collection<String> listIPs, int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<IpPermission> ipPermissions = new ArrayList<IpPermission>();
ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to));
if (config.isDeployedInVPC()) {
if (config.getACLGroupIdForVPC().isEmpty()) {
throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors");
}
client.revokeSecurityGroupIngress(
new RevokeSecurityGroupIngressRequest()
.withGroupId(config.getACLGroupIdForVPC())
.withIpPermissions(ipPermissions));
} else {
client.revokeSecurityGroupIngress(
new RevokeSecurityGroupIngressRequest(config.getACLGroupName(), ipPermissions));
}
logger.info("Removed " + StringUtils.join(listIPs, ",") + " from ACL");
} finally {
if (client != null) {
client.shutdown();
}
}
}
/**
* List SG ACL's
*/
public List<String> listACL(int from, int to) {
AmazonEC2 client = null;
try {
client = getEc2Client();
List<String> ipPermissions = new ArrayList<String>();
DescribeSecurityGroupsResult result;
if (config.isDeployedInVPC()) {
if (config.getACLGroupIdForVPC().isEmpty()) {
throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors");
}
DescribeSecurityGroupsRequest describeSecurityGroupsRequest =
new DescribeSecurityGroupsRequest().withGroupIds(config.getACLGroupIdForVPC());
result = client.describeSecurityGroups(describeSecurityGroupsRequest);
} else {
DescribeSecurityGroupsRequest describeSecurityGroupsRequest =
new DescribeSecurityGroupsRequest().withGroupNames(Arrays.asList(config.getACLGroupName()));
result = client.describeSecurityGroups(describeSecurityGroupsRequest);
}
for (SecurityGroup group : result.getSecurityGroups()) {
for (IpPermission perm : group.getIpPermissions()) {
if (perm.getFromPort() == from && perm.getToPort() == to) {
ipPermissions.addAll(perm.getIpRanges());
}
}
}
return ipPermissions;
} finally {
if (client != null) {
client.shutdown();
}
}
}
public Map<String, List<Integer>> getACLPortMap(String acl) {
AmazonEC2 client = null;
Map<String, List<Integer>> aclPortMap = new HashMap<String, List<Integer>>();
try {
client = getEc2Client();
DescribeSecurityGroupsResult result;
if (config.isDeployedInVPC()) {
if (config.getACLGroupIdForVPC().isEmpty()) {
throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors");
}
DescribeSecurityGroupsRequest describeSecurityGroupsRequest =
new DescribeSecurityGroupsRequest().withGroupIds(config.getACLGroupIdForVPC());
result = client.describeSecurityGroups(describeSecurityGroupsRequest);
} else {
DescribeSecurityGroupsRequest describeSecurityGroupsRequest =
new DescribeSecurityGroupsRequest().withGroupNames(Arrays.asList(config.getACLGroupName()));
result = client.describeSecurityGroups(describeSecurityGroupsRequest);
}
for (SecurityGroup group : result.getSecurityGroups()) {
for (IpPermission perm : group.getIpPermissions()) {
for (String ipRange : perm.getIpRanges()) {
// If given ACL matches from the list of IP ranges then look for "from" and "to" ports
if (acl.equalsIgnoreCase(ipRange)) {
List<Integer> fromToList = new ArrayList<Integer>();
fromToList.add(perm.getFromPort());
fromToList.add(perm.getToPort());
logger.info("ACL: {}, from: {}, to: {}", acl, perm.getFromPort(), perm.getToPort());
aclPortMap.put(acl, fromToList);
}
}
}
}
return aclPortMap;
} finally {
if (client != null) {
client.shutdown();
}
}
}
@Override
public void expandRacMembership(int count) {
AmazonAutoScaling client = null;
try {
client = getAutoScalingClient();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(config.getASGName());
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
AutoScalingGroup asg = res.getAutoScalingGroups().get(0);
UpdateAutoScalingGroupRequest ureq = new UpdateAutoScalingGroupRequest();
ureq.setAutoScalingGroupName(asg.getAutoScalingGroupName());
ureq.setMinSize(asg.getMinSize() + 1);
ureq.setMaxSize(asg.getMinSize() + 1);
ureq.setDesiredCapacity(asg.getMinSize() + 1);
client.updateAutoScalingGroup(ureq);
} finally {
if (client != null) {
client.shutdown();
}
}
}
protected AmazonAutoScaling getAutoScalingClient() {
AmazonAutoScaling client = new AmazonAutoScalingClient(provider.getAwsCredentialProvider());
client.setEndpoint("autoscaling." + config.getDC() + ".amazonaws.com");
return client;
}
protected AmazonEC2 getEc2Client() {
AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider());
client.setEndpoint("ec2." + config.getDC() + ".amazonaws.com");
return client;
}
} | 5,550 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/SetVPCSecurityGroupID.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest;
import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult;
import com.amazonaws.services.ec2.model.SecurityGroup;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.utils.SystemUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Created by sloke on 11/16/15.
* This class has been added especially for VPC Purposes. If SecurityGroup is deployed in VPC,
* then SecurityGroupId is needed to make any modifications or querying to associated SecurityGroup
*
* Sets the Security Group Id for the VPC Security Group
* If SecurityGroupId is not found for the matching the Security Group
* then RuntimeException is thrown
* */
@Singleton
public class SetVPCSecurityGroupID {
private static final Logger logger = LoggerFactory.getLogger(SetVPCSecurityGroupID.class);
private final IConfiguration config;
private final ICredential provider;
@Inject
public SetVPCSecurityGroupID(IConfiguration config, ICredential provider) {
this.config = config;
this.provider = provider;
}
public void execute() {
AmazonEC2 client = null;
try {
client = getEc2Client();
//Get All the Existing Sec Group Ids
String[] securityGroupIds = SystemUtils.getSecurityGroupIds(config.getMacIdForInstance());
DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withGroupIds(securityGroupIds);
DescribeSecurityGroupsResult result = client.describeSecurityGroups(req);
boolean securityGroupFound = false;
for (SecurityGroup securityGroup : result.getSecurityGroups()) {
logger.info("Read " + securityGroup.getGroupName());
if (securityGroup.getGroupName().equals(config.getACLGroupNameForVPC())) {
logger.info("Found matching security group name: " + securityGroup.getGroupName());
// Setting configuration value with the correct SG ID
config.setACLGroupIdForVPC(securityGroup.getGroupId());
securityGroupFound = true;
break;
}
}
// If correct SG was not found, throw Exception
if (!securityGroupFound) {
throw new RuntimeException("Cannot find matching security group for " + config.getACLGroupNameForVPC());
}
}
catch (Exception e) {
throw new RuntimeException(e);
}
finally {
if (client != null) {
client.shutdown();
}
}
}
private AmazonEC2 getEc2Client() {
AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider());
client.setEndpoint("ec2." + config.getDC() + ".amazonaws.com");
return client;
}
} | 5,551 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/UpdateSecuritySettings.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.IMembership;
import com.netflix.raigad.identity.IRaigadInstanceFactory;
import com.netflix.raigad.identity.InstanceManager;
import com.netflix.raigad.identity.RaigadInstance;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
/**
* This class will associate public IP's with a new instance so they can talk across the regions.
* <p>
* Requirements:
* 1. Nodes in the same region needs to be able to talk to each other.
* 2. Nodes in other regions needs to be able to talk to the others in the other region.
* <p>
* Assumptions:
* 1. IRaigadInstanceFactory will provide the membership and will be visible across the regions
* 2. IMembership amazon or any other implementation which can tell if the instance is a
* part of the group (ASG in Amazon's case).
*/
@Singleton
public class UpdateSecuritySettings extends Task {
private static final Logger logger = LoggerFactory.getLogger(UpdateSecuritySettings.class);
public static final String JOB_NAME = "Update_SG";
public static boolean firstTimeUpdated = false;
private static final Random RANDOM = new Random();
private final IMembership membership;
private final IRaigadInstanceFactory factory;
@Inject
public UpdateSecuritySettings(IConfiguration config, IMembership membership, IRaigadInstanceFactory factory) {
super(config);
this.membership = membership;
this.factory = factory;
}
/**
* Master nodes execute this at the specified interval, others run only on startup
*/
@Override
public void execute() {
int transportPort = config.getTransportTcpPort();
int restPort = config.getHttpPort();
List<String> accessControlLists = membership.listACL(transportPort, transportPort);
// Get instances based on node types (tribe / non-tribe)
List<RaigadInstance> instances = getInstanceList();
// Iterate cluster nodes and build a list of IP's
List<String> ipsToAdd = Lists.newArrayList();
List<String> currentRanges = Lists.newArrayList();
for (RaigadInstance instance : instances) {
String range = instance.getHostIP() + "/32";
currentRanges.add(range);
if (!accessControlLists.contains(range)) {
ipsToAdd.add(range);
}
}
if (ipsToAdd.size() > 0) {
logger.info("Adding IPs on ports {} and {}: {}", transportPort, restPort, ipsToAdd);
membership.addACL(ipsToAdd, transportPort, transportPort);
membership.addACL(ipsToAdd, restPort, restPort);
firstTimeUpdated = true;
}
// Create a list of IP's to remove
List<String> ipsToRemove = Lists.newArrayList();
for (String accessControlList : accessControlLists) {
// Remove if not found
if (!currentRanges.contains(accessControlList)) {
ipsToRemove.add(accessControlList);
}
}
if (ipsToRemove.size() > 0) {
logger.info("Removing IPs on ports {} and {}: {}", transportPort, restPort, ipsToRemove);
membership.removeACL(ipsToRemove, transportPort, transportPort);
membership.removeACL(ipsToRemove, restPort, restPort);
firstTimeUpdated = true;
}
}
private List<RaigadInstance> getInstanceList() {
List<RaigadInstance> instances = new ArrayList<>();
List<String> tribeClusters = new ArrayList<String>(Arrays.asList(StringUtils.split(config.getCommaSeparatedTribeClusterNames(), ",")));
assert (tribeClusters.size() != 0) : "Need at least one tribe cluster";
tribeClusters.forEach(tribeClusterName -> instances.addAll(factory.getAllIds(tribeClusterName)));
if (config.isDebugEnabled()) {
instances.forEach(instance -> logger.debug(instance.toString()));
}
return instances;
}
public static TaskTimer getTimer(InstanceManager instanceManager) {
// Only master nodes will update security group settings
if (!instanceManager.isMaster()) {
return new SimpleTimer(JOB_NAME);
} else {
return new SimpleTimer(JOB_NAME, 120 * 1000 + RANDOM.nextInt(120 * 1000));
}
}
@Override
public String getName() {
return JOB_NAME;
}
} | 5,552 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/ICredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.google.inject.ImplementedBy;
/**
* Credential file interface for services supporting
* Access ID and key authentication
*/
@ImplementedBy(ClearCredential.class)
public interface ICredential
{
public AWSCredentialsProvider getAwsCredentialProvider();
}
| 5,553 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/UpdateTribeSecuritySettings.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.IMembership;
import com.netflix.raigad.identity.IRaigadInstanceFactory;
import com.netflix.raigad.identity.InstanceManager;
import com.netflix.raigad.identity.RaigadInstance;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* This class will associate public IP's with a new instance so they can talk across the regions.
* <p>
* Requirements:
* (1) Nodes in the same region needs to be able to talk to each other.
* (2) Nodes in other regions needs to be able to talk to the others in the other region.
* <p>
* Assumptions:
* (1) IRaigadInstanceFactory will provide the membership and will be visible across the regions
* (2) IMembership Amazon or any other implementation which can tell if the instance
* is part of the group (ASG in Amazon's case).
*/
@Singleton
public class UpdateTribeSecuritySettings extends Task {
private static final Logger logger = LoggerFactory.getLogger(UpdateTribeSecuritySettings.class);
public static final String JOB_NAME = "Update_TRIBE_SG";
public static boolean firstTimeUpdated = false;
private static final String COMMA_SEPARATOR = ",";
private static final String PARAM_SEPARATOR = "=";
private static final Random ran = new Random();
private final IMembership membership;
private final IRaigadInstanceFactory factory;
/**
* clusterPortMap
* es_tribe : 8000
* es_tribe_source1 : 8001
* es_tribe_source2 : 8002
*/
private final Map<String, Integer> clusterPortMap = new HashMap<String, Integer>();
@Inject
public UpdateTribeSecuritySettings(IConfiguration config, IMembership membership, IRaigadInstanceFactory factory) {
super(config);
this.membership = membership;
this.factory = factory;
}
/**
* Master nodes execute this at the specified interval.
* Other nodes run only on startup.
*/
@Override
public void execute() {
// Initializing cluster-port map from config properties
initializeClusterPortMap();
List<String> accessControlLists = new ArrayList<>();
for (String clusterName : clusterPortMap.keySet()) {
List<String> aclList = membership.listACL(clusterPortMap.get(clusterName), clusterPortMap.get(clusterName));
accessControlLists.addAll(aclList);
}
List<RaigadInstance> instances = getInstanceList();
Map<String, String> addAclClusterMap = new HashMap<>();
Map<String, String> currentIpClusterMap = new HashMap<>();
for (RaigadInstance instance : instances) {
String range = instance.getHostIP() + "/32";
if (!accessControlLists.contains(range)) {
addAclClusterMap.put(range, instance.getApp());
}
// Just generating ranges
currentIpClusterMap.put(range, instance.getApp());
}
if (addAclClusterMap.keySet().size() > 0) {
/**
* clusterInstancesMap
* es_tribe : 50.60.70.80,50.60.70.81
* es_tribe_source1 : 60.70.80.90,60.70.80.91
* es_tribe_source2 : 70.80.90.00,70.80.90.01
*/
Map<String, List<String>> clusterInstancesMap = generateClusterToAclListMap(addAclClusterMap);
for (String currentClusterName : clusterInstancesMap.keySet()) {
if (currentClusterName.startsWith("es_tribe_")) {
clusterPortMap.forEach((clusterName, transportPort) -> {
logger.info("Adding IPs for {} on port {}: {}", currentClusterName, transportPort, clusterInstancesMap.get(currentClusterName));
membership.addACL(clusterInstancesMap.get(currentClusterName), transportPort, transportPort);
});
} else {
logger.info("Adding IPs for {} on port {}: {}", currentClusterName, clusterPortMap.get(currentClusterName), clusterInstancesMap.get(currentClusterName));
membership.addACL(clusterInstancesMap.get(currentClusterName), clusterPortMap.get(currentClusterName), clusterPortMap.get(currentClusterName));
}
}
firstTimeUpdated = true;
}
// Iterating to remove ACL's
List<String> removeAclList = new ArrayList<>();
for (String acl : accessControlLists) {
if (!currentIpClusterMap.containsKey(acl)) {
removeAclList.add(acl);
}
}
if (removeAclList.size() > 0) {
for (String acl : removeAclList) {
Map<String, List<Integer>> aclPortMap = membership.getACLPortMap(acl);
int from = aclPortMap.get(acl).get(0);
int to = aclPortMap.get(acl).get(1);
membership.removeACL(Collections.singletonList(acl), from, to);
}
firstTimeUpdated = true;
}
}
private void initializeClusterPortMap() {
// Adding existing cluster-port mapping
if (!clusterPortMap.containsKey(config.getAppName())) {
clusterPortMap.put(config.getAppName(), config.getTransportTcpPort());
logger.info("Adding cluster [{}:{}]", config.getAppName(), config.getTransportTcpPort());
}
String clusterParams = config.getCommaSeparatedSourceClustersForTribeNode();
assert (clusterParams != null) : "Clusters parameters cannot be null";
String[] clusters = StringUtils.split(clusterParams.trim(), COMMA_SEPARATOR);
assert (clusters.length != 0) : "At least one cluster is needed";
//Common settings
for (String cluster : clusters) {
String[] clusterPort = cluster.trim().split(PARAM_SEPARATOR);
assert (clusterPort.length != 2) : "Cluster name or transport port is missing in configuration";
if (!clusterPortMap.containsKey(clusterPort[0].trim())) {
String sourceTribeClusterName = clusterPort[0].trim();
Integer sourceTribeClusterPort = Integer.parseInt(clusterPort[1].trim());
clusterPortMap.put(sourceTribeClusterName, sourceTribeClusterPort);
logger.info("Adding cluster [{}:{}]", sourceTribeClusterName, sourceTribeClusterPort);
}
}
}
private Map<String, List<String>> generateClusterToAclListMap(Map<String, String> addAclClusterMap) {
Map<String, List<String>> clusterAclsMap = new HashMap<>();
for (String acl : addAclClusterMap.keySet()) {
if (clusterAclsMap.containsKey(addAclClusterMap.get(acl))) {
clusterAclsMap.get(addAclClusterMap.get(acl)).add(acl);
} else {
List<String> aclList = new ArrayList<>();
aclList.add(acl);
clusterAclsMap.put(addAclClusterMap.get(acl), aclList);
}
}
return clusterAclsMap;
}
private List<RaigadInstance> getInstanceList() {
List<RaigadInstance> instances = new ArrayList<>();
for (String clusterName : clusterPortMap.keySet()) {
instances.addAll(factory.getAllIds(clusterName));
}
if (config.isDebugEnabled()) {
for (RaigadInstance instance : instances) {
logger.debug(instance.toString());
}
}
return instances;
}
public static TaskTimer getTimer(InstanceManager instanceManager) {
return new SimpleTimer(JOB_NAME, 120 * 1000 + ran.nextInt(120 * 1000));
}
@Override
public String getName() {
return JOB_NAME;
}
} | 5,554 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/IAMCredential.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.aws;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
public class IAMCredential implements ICredential
{
private final InstanceProfileCredentialsProvider iamCredProvider;
public IAMCredential()
{
this.iamCredProvider = new InstanceProfileCredentialsProvider();
}
public AWSCredentialsProvider getAwsCredentialProvider()
{
return iamCredProvider;
}
}
| 5,555 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/dataobjects/MasterNodeInformation.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.dataobjects;
import org.codehaus.jackson.annotate.JsonCreator;
import org.codehaus.jackson.annotate.JsonProperty;
/*
[{
"id":"8sZZWYmmQaeNUKMq1S1uow",
"host":"es-slokemsd-useast1d-master-i-9e1b62b4",
"ip":"10.218.89.139",
"node":"us-east-1d.i-9e1b62b4"
}]
*/
public class MasterNodeInformation {
private final String id;
private final String host;
private final String ip;
private final String node;
@JsonCreator
public MasterNodeInformation(@JsonProperty("id") final String id,
@JsonProperty("host") final String host,
@JsonProperty("ip") final String ip,
@JsonProperty("node") final String node) {
this.id = id;
this.host = host;
this.ip = ip;
this.node = node;
}
public String getId() {
return id;
}
public String getHost() {
return host;
}
public String getIp() {
return ip;
}
public String getNode() {
return node;
}
}
| 5,556 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/objectmapper/DefaultMasterNodeInfoMapper.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.objectmapper;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.Version;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.codehaus.jackson.map.module.SimpleModule;
public class DefaultMasterNodeInfoMapper extends ObjectMapper
{
public DefaultMasterNodeInfoMapper() {
this(null);
}
public DefaultMasterNodeInfoMapper(JsonFactory factory) {
super(factory);
SimpleModule serializerModule = new SimpleModule("default serializers", new Version(1, 0, 0, null));
registerModule(serializerModule);
configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false);
configure(SerializationConfig.Feature.FAIL_ON_EMPTY_BEANS, false);
}
}
| 5,557 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/objectmapper/DefaultIndexMapper.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.objectmapper;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.Version;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.codehaus.jackson.map.module.SimpleModule;
public class DefaultIndexMapper extends ObjectMapper {
public DefaultIndexMapper() {
this(null);
}
public DefaultIndexMapper(JsonFactory factory) {
super(factory);
SimpleModule serializerModule = new SimpleModule("default serializers", new Version(1, 0, 0, null));
registerModule(serializerModule);
configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false);
configure(SerializationConfig.Feature.AUTO_DETECT_GETTERS, false);
configure(SerializationConfig.Feature.AUTO_DETECT_FIELDS, false);
configure(SerializationConfig.Feature.INDENT_OUTPUT, false);
}
}
| 5,558 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/ThreadPoolStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.threadpool.ThreadPoolStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class ThreadPoolStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(ThreadPoolStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_ThreadPoolMonitor";
private final Elasticsearch_ThreadPoolStatsReporter tpStatsReporter;
@Inject
public ThreadPoolStatsMonitor(IConfiguration config) {
super(config);
tpStatsReporter = new Elasticsearch_ThreadPoolStatsReporter();
Monitors.registerObject(tpStatsReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
ThreadPoolStatsBean threadPoolStatsBean = new ThreadPoolStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("Thread pool stats are not available (node stats is not available)");
return;
}
ThreadPoolStats threadPoolStats = nodeStats.getThreadPool();
if (threadPoolStats == null) {
logger.info("Thread pool stats are not available");
return;
}
Iterator<ThreadPoolStats.Stats> threadPoolStatsIterator = threadPoolStats.iterator();
while (threadPoolStatsIterator.hasNext()) {
ThreadPoolStats.Stats stat = threadPoolStatsIterator.next();
if (stat.getName().equals("index")) {
threadPoolStatsBean.indexThreads = stat.getThreads();
threadPoolStatsBean.indexQueue = stat.getQueue();
threadPoolStatsBean.indexActive = stat.getActive();
threadPoolStatsBean.indexRejected = stat.getRejected();
threadPoolStatsBean.indexLargest = stat.getLargest();
threadPoolStatsBean.indexCompleted = stat.getCompleted();
} else if (stat.getName().equals("get")) {
threadPoolStatsBean.getThreads = stat.getThreads();
threadPoolStatsBean.getQueue = stat.getQueue();
threadPoolStatsBean.getActive = stat.getActive();
threadPoolStatsBean.getRejected = stat.getRejected();
threadPoolStatsBean.getLargest = stat.getLargest();
threadPoolStatsBean.getCompleted = stat.getCompleted();
} else if (stat.getName().equals("search")) {
threadPoolStatsBean.searchThreads = stat.getThreads();
threadPoolStatsBean.searchQueue = stat.getQueue();
threadPoolStatsBean.searchActive = stat.getActive();
threadPoolStatsBean.searchRejected = stat.getRejected();
threadPoolStatsBean.searchLargest = stat.getLargest();
threadPoolStatsBean.searchCompleted = stat.getCompleted();
} else if (stat.getName().equals("bulk")) {
threadPoolStatsBean.bulkThreads = stat.getThreads();
threadPoolStatsBean.bulkQueue = stat.getQueue();
threadPoolStatsBean.bulkActive = stat.getActive();
threadPoolStatsBean.bulkRejected = stat.getRejected();
threadPoolStatsBean.bulkLargest = stat.getLargest();
threadPoolStatsBean.bulkCompleted = stat.getCompleted();
}
}
} catch (Exception e) {
logger.warn("Failed to load thread pool stats data", e);
}
tpStatsReporter.threadPoolBean.set(threadPoolStatsBean);
}
public class Elasticsearch_ThreadPoolStatsReporter {
private final AtomicReference<ThreadPoolStatsBean> threadPoolBean;
public Elasticsearch_ThreadPoolStatsReporter() {
threadPoolBean = new AtomicReference<ThreadPoolStatsBean>(new ThreadPoolStatsBean());
}
@Monitor(name = "IndexThreads", type = DataSourceType.GAUGE)
public long getIndexThreads() {
return threadPoolBean.get().indexThreads;
}
@Monitor(name = "IndexQueue", type = DataSourceType.GAUGE)
public long getIndexQueue() {
return threadPoolBean.get().indexQueue;
}
@Monitor(name = "indexActive", type = DataSourceType.GAUGE)
public long getIndexActive() {
return threadPoolBean.get().indexActive;
}
@Monitor(name = "indexRejected", type = DataSourceType.COUNTER)
public long getIndexRejected() {
return threadPoolBean.get().indexRejected;
}
@Monitor(name = "indexLargest", type = DataSourceType.GAUGE)
public long getIndexLargest() {
return threadPoolBean.get().indexLargest;
}
@Monitor(name = "indexCompleted", type = DataSourceType.COUNTER)
public long getIndexCompleted() {
return threadPoolBean.get().indexCompleted;
}
@Monitor(name = "getThreads", type = DataSourceType.GAUGE)
public long getGetThreads() {
return threadPoolBean.get().getThreads;
}
@Monitor(name = "getQueue", type = DataSourceType.GAUGE)
public long getGetQueue() {
return threadPoolBean.get().getQueue;
}
@Monitor(name = "getActive", type = DataSourceType.GAUGE)
public long getGetActive() {
return threadPoolBean.get().getActive;
}
@Monitor(name = "getRejected", type = DataSourceType.COUNTER)
public long getGetRejected() {
return threadPoolBean.get().getRejected;
}
@Monitor(name = "getLargest", type = DataSourceType.GAUGE)
public long getGetLargest() {
return threadPoolBean.get().getLargest;
}
@Monitor(name = "getCompleted", type = DataSourceType.COUNTER)
public long getGetCompleted() {
return threadPoolBean.get().getCompleted;
}
@Monitor(name = "searchThreads", type = DataSourceType.GAUGE)
public long getSearchThreads() {
return threadPoolBean.get().searchThreads;
}
@Monitor(name = "searchQueue", type = DataSourceType.GAUGE)
public long getSearchQueue() {
return threadPoolBean.get().searchQueue;
}
@Monitor(name = "searchActive", type = DataSourceType.GAUGE)
public long getSearchActive() {
return threadPoolBean.get().searchActive;
}
@Monitor(name = "searchRejected", type = DataSourceType.COUNTER)
public long getSearchRejected() {
return threadPoolBean.get().searchRejected;
}
@Monitor(name = "searchLargest", type = DataSourceType.GAUGE)
public long getSearchLargest() {
return threadPoolBean.get().searchLargest;
}
@Monitor(name = "searchCompleted", type = DataSourceType.COUNTER)
public long getSearchCompleted() {
return threadPoolBean.get().searchCompleted;
}
@Monitor(name = "bulkThreads", type = DataSourceType.GAUGE)
public long getBulkThreads() {
return threadPoolBean.get().bulkThreads;
}
@Monitor(name = "bulkQueue", type = DataSourceType.GAUGE)
public long getBulkQueue() {
return threadPoolBean.get().bulkQueue;
}
@Monitor(name = "bulkActive", type = DataSourceType.GAUGE)
public long getBulkActive() {
return threadPoolBean.get().bulkActive;
}
@Monitor(name = "bulkRejected", type = DataSourceType.COUNTER)
public long getBulkRejected() {
return threadPoolBean.get().bulkRejected;
}
@Monitor(name = "bulkLargest", type = DataSourceType.GAUGE)
public long getBulkLargest() {
return threadPoolBean.get().bulkLargest;
}
@Monitor(name = "bulkCompleted", type = DataSourceType.COUNTER)
public long getBulkCompleted() {
return threadPoolBean.get().bulkCompleted;
}
}
private static class ThreadPoolStatsBean {
private long indexThreads;
private long indexQueue;
private long indexActive;
private long indexRejected;
private long indexLargest;
private long indexCompleted;
private long getThreads;
private long getQueue;
private long getActive;
private long getRejected;
private long getLargest;
private long getCompleted;
private long searchThreads;
private long searchQueue;
private long searchActive;
private long searchRejected;
private long searchLargest;
private long searchCompleted;
private long bulkThreads;
private long bulkQueue;
private long bulkActive;
private long bulkRejected;
private long bulkLargest;
private long bulkCompleted;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,559 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/OsStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.monitor.os.OsStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class OsStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(OsStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_OsStatsMonitor";
private final Elasticsearch_OsStatsReporter osStatsReporter;
@Inject
public OsStatsMonitor(IConfiguration config) {
super(config);
osStatsReporter = new Elasticsearch_OsStatsReporter();
Monitors.registerObject(osStatsReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
OsStatsBean osStatsBean = new OsStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("OS stats is not available (node stats is not available)");
return;
}
OsStats osStats = nodeStats.getOs();
if (osStats == null) {
logger.info("OS stats is not available");
return;
}
//Memory
osStatsBean.freeInBytes = osStats.getMem().getFree().getBytes();
osStatsBean.usedInBytes = osStats.getMem().getUsed().getBytes();
osStatsBean.actualFreeInBytes = osStats.getMem().getFree().getBytes();
osStatsBean.actualUsedInBytes = osStats.getMem().getUsed().getBytes();
osStatsBean.freePercent = osStats.getMem().getFreePercent();
osStatsBean.usedPercent = osStats.getMem().getUsedPercent();
//CPU
osStatsBean.cpuSys = osStats.getCpu().getPercent();
osStatsBean.cpuUser = 0;
osStatsBean.cpuIdle = 0;
osStatsBean.cpuStolen = 0;
//Swap
osStatsBean.swapFreeInBytes = osStats.getSwap().getFree().getBytes();
osStatsBean.swapUsedInBytes = osStats.getSwap().getUsed().getBytes();
//Uptime
osStatsBean.uptimeInMillis = 0;
//Timestamp
osStatsBean.osTimestamp = osStats.getTimestamp();
} catch (Exception e) {
logger.warn("Failed to load OS stats data", e);
}
osStatsReporter.osStatsBean.set(osStatsBean);
}
public class Elasticsearch_OsStatsReporter {
private final AtomicReference<OsStatsBean> osStatsBean;
public Elasticsearch_OsStatsReporter() {
osStatsBean = new AtomicReference<OsStatsBean>(new OsStatsBean());
}
@Monitor(name = "free_in_bytes", type = DataSourceType.GAUGE)
public long getFreeInBytes() {
return osStatsBean.get().freeInBytes;
}
@Monitor(name = "used_in_bytes", type = DataSourceType.GAUGE)
public long getUsedInBytes() {
return osStatsBean.get().usedInBytes;
}
@Monitor(name = "actual_free_in_bytes", type = DataSourceType.GAUGE)
public long getActualFreeInBytes() {
return osStatsBean.get().actualFreeInBytes;
}
@Monitor(name = "actual_used_in_bytes", type = DataSourceType.GAUGE)
public long geActualUsedInBytes() {
return osStatsBean.get().actualUsedInBytes;
}
@Monitor(name = "free_percent", type = DataSourceType.GAUGE)
public short getFreePercent() {
return osStatsBean.get().freePercent;
}
@Monitor(name = "used_percent", type = DataSourceType.GAUGE)
public short getUsedPercent() {
return osStatsBean.get().usedPercent;
}
@Monitor(name = "cpu_sys", type = DataSourceType.GAUGE)
public short getCpuSys() {
return osStatsBean.get().cpuSys;
}
@Monitor(name = "cpu_user", type = DataSourceType.GAUGE)
public short getCpuUser() {
return osStatsBean.get().cpuUser;
}
@Monitor(name = "cpu_idle", type = DataSourceType.GAUGE)
public short getCpuIdle() {
return osStatsBean.get().cpuIdle;
}
@Monitor(name = "cpu_stolen", type = DataSourceType.GAUGE)
public short getCpuStolen() {
return osStatsBean.get().cpuStolen;
}
@Monitor(name = "swap_used_in_bytes", type = DataSourceType.GAUGE)
public long getSwapUsedInBytes() {
return osStatsBean.get().swapUsedInBytes;
}
@Monitor(name = "swap_free_in_bytes", type = DataSourceType.GAUGE)
public long getSwapFreeInBytes() {
return osStatsBean.get().swapFreeInBytes;
}
@Monitor(name = "uptime_in_millis", type = DataSourceType.GAUGE)
public double getUptimeInMillis() {
return osStatsBean.get().uptimeInMillis;
}
@Monitor(name = "os_timestamp", type = DataSourceType.GAUGE)
public long getOsTimestamp() {
return osStatsBean.get().osTimestamp;
}
}
private static class OsStatsBean {
private long freeInBytes;
private long usedInBytes;
private long actualFreeInBytes;
private long actualUsedInBytes;
private short freePercent;
private short usedPercent;
private short cpuSys;
private short cpuUser;
private short cpuIdle;
private short cpuStolen;
private long swapUsedInBytes;
private long swapFreeInBytes;
private long uptimeInMillis;
private long osTimestamp;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,560 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/JvmStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.monitor.jvm.JvmStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class JvmStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(JvmStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_JvmStatsMonitor";
public static final String GC_YOUNG_TAG = "young";
public static final String GC_OLD_TAG = "old";
public static final String GC_SURVIVOR_TAG = "survivor";
private final Elasticsearch_JvmStatsReporter jvmStatsReporter;
@Inject
public JvmStatsMonitor(IConfiguration config) {
super(config);
jvmStatsReporter = new Elasticsearch_JvmStatsReporter();
Monitors.registerObject(jvmStatsReporter);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
JvmStatsBean jvmStatsBean = new JvmStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("JVM stats is not available (node stats is not available)");
return;
}
JvmStats jvmStats = nodeStats.getJvm();
if (jvmStats == null) {
logger.info("JVM stats is not available");
return;
}
//Heap
jvmStatsBean.heapCommittedInBytes = jvmStats.getMem().getHeapCommitted().getMb();
jvmStatsBean.heapMaxInBytes = jvmStats.getMem().getHeapMax().getMb();
jvmStatsBean.heapUsedInBytes = jvmStats.getMem().getHeapUsed().getMb();
jvmStatsBean.heapUsedPercent = jvmStats.getMem().getHeapUsedPercent();
jvmStatsBean.nonHeapCommittedInBytes = jvmStats.getMem().getNonHeapCommitted().getMb();
jvmStatsBean.nonHeapUsedInBytes = jvmStats.getMem().getNonHeapUsed().getMb();
Iterator<JvmStats.MemoryPool> memoryPoolIterator = jvmStats.getMem().iterator();
while (memoryPoolIterator.hasNext()) {
JvmStats.MemoryPool memoryPoolStats = memoryPoolIterator.next();
if (memoryPoolStats.getName().equalsIgnoreCase(GC_YOUNG_TAG)) {
jvmStatsBean.youngMaxInBytes = memoryPoolStats.getMax().getBytes();
jvmStatsBean.youngUsedInBytes = memoryPoolStats.getUsed().getBytes();
jvmStatsBean.youngPeakUsedInBytes = memoryPoolStats.getPeakUsed().getBytes();
jvmStatsBean.youngPeakMaxInBytes = memoryPoolStats.getPeakMax().getBytes();
} else if (memoryPoolStats.getName().equalsIgnoreCase(GC_SURVIVOR_TAG)) {
jvmStatsBean.survivorMaxInBytes = memoryPoolStats.getMax().getBytes();
jvmStatsBean.survivorUsedInBytes = memoryPoolStats.getUsed().getBytes();
jvmStatsBean.survivorPeakUsedInBytes = memoryPoolStats.getPeakUsed().getBytes();
jvmStatsBean.survivorPeakMaxInBytes = memoryPoolStats.getPeakMax().getBytes();
} else if (memoryPoolStats.getName().equalsIgnoreCase(GC_OLD_TAG)) {
jvmStatsBean.oldMaxInBytes = memoryPoolStats.getMax().getBytes();
jvmStatsBean.oldUsedInBytes = memoryPoolStats.getUsed().getBytes();
jvmStatsBean.oldPeakUsedInBytes = memoryPoolStats.getPeakUsed().getBytes();
jvmStatsBean.oldPeakMaxInBytes = memoryPoolStats.getPeakMax().getBytes();
}
}
//Threads
jvmStatsBean.threadCount = jvmStats.getThreads().getCount();
jvmStatsBean.threadPeakCount = jvmStats.getThreads().getPeakCount();
jvmStatsBean.uptimeHours = jvmStats.getUptime().getHours();
//GC
for (JvmStats.GarbageCollector garbageCollector : jvmStats.getGc().getCollectors()) {
if (garbageCollector.getName().equalsIgnoreCase(GC_YOUNG_TAG)) {
jvmStatsBean.youngCollectionCount = garbageCollector.getCollectionCount();
jvmStatsBean.youngCollectionTimeInMillis = garbageCollector.getCollectionTime().getMillis();
} else if (garbageCollector.getName().equalsIgnoreCase(GC_OLD_TAG)) {
jvmStatsBean.oldCollectionCount = garbageCollector.getCollectionCount();
jvmStatsBean.oldCollectionTimeInMillis = garbageCollector.getCollectionTime().getMillis();
}
}
} catch (Exception e) {
logger.warn("Failed to load JVM stats data", e);
}
jvmStatsReporter.jvmStatsBean.set(jvmStatsBean);
}
public class Elasticsearch_JvmStatsReporter {
private final AtomicReference<JvmStatsBean> jvmStatsBean;
public Elasticsearch_JvmStatsReporter() {
jvmStatsBean = new AtomicReference<JvmStatsBean>(new JvmStatsBean());
}
@Monitor(name = "heap_committed_in_bytes", type = DataSourceType.GAUGE)
public long getHeapCommitedInBytes() {
return jvmStatsBean.get().heapCommittedInBytes;
}
@Monitor(name = "heap_max_in_bytes", type = DataSourceType.GAUGE)
public long getHeapMaxInBytes() {
return jvmStatsBean.get().heapMaxInBytes;
}
@Monitor(name = "heap_used_in_bytes", type = DataSourceType.GAUGE)
public long getHeapUsedInBytes() {
return jvmStatsBean.get().heapUsedInBytes;
}
@Monitor(name = "non_heap_committed_in_bytes", type = DataSourceType.GAUGE)
public long getNonHeapCommittedInBytes() {
return jvmStatsBean.get().nonHeapCommittedInBytes;
}
@Monitor(name = "non_heap_used_in_bytes", type = DataSourceType.GAUGE)
public long getNonHeapUsedInBytes() {
return jvmStatsBean.get().nonHeapUsedInBytes;
}
@Monitor(name = "heap_used_percent", type = DataSourceType.GAUGE)
public short getHeapUsedPercent() {
return jvmStatsBean.get().heapUsedPercent;
}
@Monitor(name = "threads_count", type = DataSourceType.GAUGE)
public long getThreadsCount() {
return jvmStatsBean.get().threadCount;
}
@Monitor(name = "threads_peak_count", type = DataSourceType.GAUGE)
public long getThreadsPeakCount() {
return jvmStatsBean.get().threadPeakCount;
}
@Monitor(name = "uptime_hours", type = DataSourceType.GAUGE)
public double getUptimeHours() {
return jvmStatsBean.get().uptimeHours;
}
@Monitor(name = "young_collection_count", type = DataSourceType.GAUGE)
public long getYoungCollectionCount() {
return jvmStatsBean.get().youngCollectionCount;
}
@Monitor(name = "young_collection_time_in_millis", type = DataSourceType.GAUGE)
public long getYoungCollectionTimeInMillis() {
return jvmStatsBean.get().youngCollectionTimeInMillis;
}
@Monitor(name = "old_collection_count", type = DataSourceType.GAUGE)
public long getOldCollectionCount() {
return jvmStatsBean.get().oldCollectionCount;
}
@Monitor(name = "old_collection_time_in_millis", type = DataSourceType.GAUGE)
public long getOldCollectionTimeInMillis() {
return jvmStatsBean.get().oldCollectionTimeInMillis;
}
@Monitor(name = "young_used_in_bytes", type = DataSourceType.GAUGE)
public long getYoungUsedInBytes() {
return jvmStatsBean.get().youngUsedInBytes;
}
@Monitor(name = "young_max_in_bytes", type = DataSourceType.GAUGE)
public long getYoungMaxInBytes() {
return jvmStatsBean.get().youngMaxInBytes;
}
@Monitor(name = "young_peak_used_in_bytes", type = DataSourceType.GAUGE)
public long getYoungPeakUsedInBytes() {
return jvmStatsBean.get().youngPeakUsedInBytes;
}
@Monitor(name = "young_peak_max_in_bytes", type = DataSourceType.GAUGE)
public long getYoungPeakMaxInBytes() {
return jvmStatsBean.get().youngPeakMaxInBytes;
}
@Monitor(name = "survivor_used_in_bytes", type = DataSourceType.GAUGE)
public long getSurvivorUsedInBytes() {
return jvmStatsBean.get().survivorUsedInBytes;
}
@Monitor(name = "survivor_max_in_bytes", type = DataSourceType.GAUGE)
public long getSurvivorMaxInBytes() {
return jvmStatsBean.get().survivorMaxInBytes;
}
@Monitor(name = "survivor_peak_used_in_bytes", type = DataSourceType.GAUGE)
public long getSurvivorPeakUsedInBytes() {
return jvmStatsBean.get().survivorPeakUsedInBytes;
}
@Monitor(name = "survivor_peak_max_in_bytes", type = DataSourceType.GAUGE)
public long getSurvivorPeakMaxInBytes() {
return jvmStatsBean.get().survivorPeakMaxInBytes;
}
@Monitor(name = "old_used_in_bytes", type = DataSourceType.GAUGE)
public long getOldUsedInBytes() {
return jvmStatsBean.get().oldUsedInBytes;
}
@Monitor(name = "old_max_in_bytes", type = DataSourceType.GAUGE)
public long getOldMaxInBytes() {
return jvmStatsBean.get().oldMaxInBytes;
}
@Monitor(name = "old_peak_used_in_bytes", type = DataSourceType.GAUGE)
public long getOldPeakUsedInBytes() {
return jvmStatsBean.get().oldPeakUsedInBytes;
}
@Monitor(name = "old_peak_max_in_bytes", type = DataSourceType.GAUGE)
public long getOldPeakMaxInBytes() {
return jvmStatsBean.get().oldPeakMaxInBytes;
}
@Monitor(name = "young_last_gc_start_time", type = DataSourceType.GAUGE)
public long getYoungLastGcStartTime() {
return jvmStatsBean.get().youngLastGcStartTime;
}
@Monitor(name = "young_last_gc_end_time", type = DataSourceType.GAUGE)
public long getYoungLastGcEndTime() {
return jvmStatsBean.get().youngLastGcEndTime;
}
@Monitor(name = "young_last_gc_max_in_bytes", type = DataSourceType.GAUGE)
public long getYoungLastGcMaxInBytes() {
return jvmStatsBean.get().youngLastGcMaxInBytes;
}
@Monitor(name = "young_last_gc_before_used_in_bytes", type = DataSourceType.GAUGE)
public long getYoungLastGcBeforeUsedInBytes() {
return jvmStatsBean.get().youngLastGcBeforeUsedInBytes;
}
@Monitor(name = "young_last_gc_after_used_in_bytes", type = DataSourceType.GAUGE)
public long getYoungLastGcAfterUsedInBytes() {
return jvmStatsBean.get().youngLastGcAfterUsedInBytes;
}
@Monitor(name = "young_last_gc_duration", type = DataSourceType.GAUGE)
public long getYoungLastGcDuration() {
return jvmStatsBean.get().youngLastGcDuration;
}
@Monitor(name = "old_last_gc_start_time", type = DataSourceType.GAUGE)
public long getOldLastGcStartTime() {
return jvmStatsBean.get().oldLastGcStartTime;
}
@Monitor(name = "old_last_gc_end_time", type = DataSourceType.GAUGE)
public long getOldLastGcEndTime() {
return jvmStatsBean.get().oldLastGcEndTime;
}
@Monitor(name = "old_last_gc_max_in_bytes", type = DataSourceType.GAUGE)
public long getOldLastGcMaxInBytes() {
return jvmStatsBean.get().oldLastGcMaxInBytes;
}
@Monitor(name = "old_last_gc_before_used_in_bytes", type = DataSourceType.GAUGE)
public long getOldLastGcBeforeUsedInBytes() {
return jvmStatsBean.get().oldLastGcBeforeUsedInBytes;
}
@Monitor(name = "old_last_gc_after_used_in_bytes", type = DataSourceType.GAUGE)
public long getOldLastGcAfterUsedInBytes() {
return jvmStatsBean.get().oldLastGcAfterUsedInBytes;
}
@Monitor(name = "old_last_gc_duration", type = DataSourceType.GAUGE)
public long getOldLastGcDuration() {
return jvmStatsBean.get().oldLastGcDuration;
}
}
private static class JvmStatsBean {
private long heapCommittedInBytes;
private long heapMaxInBytes;
private long heapUsedInBytes;
private long nonHeapCommittedInBytes;
private long nonHeapUsedInBytes;
private short heapUsedPercent;
private int threadCount;
private int threadPeakCount;
private long uptimeHours;
private long youngCollectionCount;
private long youngCollectionTimeInMillis;
private long oldCollectionCount;
private long oldCollectionTimeInMillis;
private long youngUsedInBytes;
private long youngMaxInBytes;
private long youngPeakUsedInBytes;
private long youngPeakMaxInBytes;
private long survivorUsedInBytes;
private long survivorMaxInBytes;
private long survivorPeakUsedInBytes;
private long survivorPeakMaxInBytes;
private long oldUsedInBytes;
private long oldMaxInBytes;
private long oldPeakUsedInBytes;
private long oldPeakMaxInBytes;
private long youngLastGcStartTime;
private long youngLastGcEndTime;
private long youngLastGcMaxInBytes;
private long youngLastGcBeforeUsedInBytes;
private long youngLastGcAfterUsedInBytes;
private long youngLastGcDuration;
private long oldLastGcStartTime;
private long oldLastGcEndTime;
private long oldLastGcMaxInBytes;
private long oldLastGcBeforeUsedInBytes;
private long oldLastGcAfterUsedInBytes;
private long oldLastGcDuration;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,561 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/EstimatedHistogram.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import org.slf4j.Logger;
import java.util.Arrays;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicLongArray;
public class EstimatedHistogram {
/**
* The series of values to which the counts in `buckets` correspond:
* 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc.
* Thus, a `buckets` of [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of 4.
* <p>
* The series starts at 1 and grows by 1.2 each time (rounding and removing duplicates). It goes from 1
* to around 36M by default (creating 90+1 buckets), which will give us timing resolution from microseconds to
* 36 seconds, with less precision as the numbers get larger.
* <p>
* Each bucket represents values from (previous bucket offset, current offset].
*/
private final long[] bucketOffsets;
// buckets is one element longer than bucketOffsets -- the last element is values greater than the last offset
final AtomicLongArray buckets;
public EstimatedHistogram() {
this(90);
}
public EstimatedHistogram(int bucketCount) {
bucketOffsets = newOffsets(bucketCount);
buckets = new AtomicLongArray(bucketOffsets.length + 1);
}
public EstimatedHistogram(long[] offsets, long[] bucketData) {
assert bucketData.length == offsets.length + 1;
bucketOffsets = offsets;
buckets = new AtomicLongArray(bucketData);
}
private static long[] newOffsets(int size) {
long[] result = new long[size];
long last = 1;
result[0] = last;
for (int i = 1; i < size; i++) {
long next = Math.round(last * 1.2);
if (next == last)
next++;
result[i] = next;
last = next;
}
return result;
}
/**
* @return the histogram values corresponding to each bucket index
*/
public long[] getBucketOffsets() {
return bucketOffsets;
}
/**
* Increments the count of the bucket closest to n, rounding UP.
*
* @param n
*/
public void add(long n) {
int index = Arrays.binarySearch(bucketOffsets, n);
if (index < 0) {
// inexact match, take the first bucket higher than n
index = -index - 1;
}
// else exact match; we're good
buckets.incrementAndGet(index);
}
/**
* @return the count in the given bucket
*/
long get(int bucket) {
return buckets.get(bucket);
}
/**
* @param reset zero out buckets afterwards if true
* @return a long[] containing the current histogram buckets
*/
public long[] getBuckets(boolean reset) {
final int len = buckets.length();
long[] rv = new long[len];
if (reset)
for (int i = 0; i < len; i++)
rv[i] = buckets.getAndSet(i, 0L);
else
for (int i = 0; i < len; i++)
rv[i] = buckets.get(i);
return rv;
}
/**
* @return the smallest value that could have been added to this histogram
*/
public long min() {
for (int i = 0; i < buckets.length(); i++) {
if (buckets.get(i) > 0)
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
}
return 0;
}
/**
* @return the largest value that could have been added to this histogram. If the histogram
* overflowed, returns Long.MAX_VALUE.
*/
public long max() {
int lastBucket = buckets.length() - 1;
if (buckets.get(lastBucket) > 0)
return Long.MAX_VALUE;
for (int i = lastBucket - 1; i >= 0; i--) {
if (buckets.get(i) > 0)
return bucketOffsets[i];
}
return 0;
}
/**
* @param percentile
* @return estimated value at given percentile
*/
public long percentile(double percentile) {
assert percentile >= 0 && percentile <= 1.0;
int lastBucket = buckets.length() - 1;
if (buckets.get(lastBucket) > 0)
throw new IllegalStateException("Unable to compute when histogram overflowed");
long pcount = (long) Math.floor(count() * percentile);
if (pcount == 0)
return 0;
long elements = 0;
for (int i = 0; i < lastBucket; i++) {
elements += buckets.get(i);
if (elements >= pcount)
return bucketOffsets[i];
}
return 0;
}
/**
* @return the mean histogram value (average of bucket offsets, weighted by count)
* @throws IllegalStateException if any values were greater than the largest bucket threshold
*/
public long mean() {
int lastBucket = buckets.length() - 1;
if (buckets.get(lastBucket) > 0)
throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed");
long elements = 0;
long sum = 0;
for (int i = 0; i < lastBucket; i++) {
long bCount = buckets.get(i);
elements += bCount;
sum += bCount * bucketOffsets[i];
}
return (long) Math.ceil((double) sum / elements);
}
/**
* @return the total number of non-zero values
*/
public long count() {
long sum = 0L;
for (int i = 0; i < buckets.length(); i++)
sum += buckets.get(i);
return sum;
}
/**
* @return true if this histogram has overflowed -- that is, a value larger than our largest bucket could bound was added
*/
public boolean isOverflowed() {
return buckets.get(buckets.length() - 1) > 0;
}
/**
* log.debug() every record in the histogram
*
* @param log
*/
public void log(Logger log) {
// only print overflow if there is any
int nameCount;
if (buckets.get(buckets.length() - 1) == 0)
nameCount = buckets.length() - 1;
else
nameCount = buckets.length();
String[] names = new String[nameCount];
int maxNameLength = 0;
for (int i = 0; i < nameCount; i++) {
names[i] = nameOfRange(bucketOffsets, i);
maxNameLength = Math.max(maxNameLength, names[i].length());
}
// emit log records
String formatstr = "%" + maxNameLength + "s: %d";
for (int i = 0; i < nameCount; i++) {
long count = buckets.get(i);
// sort-of-hack to not print empty ranges at the start that are only used to demarcate the
// first populated range. for code clarity we don't omit this record from the maxNameLength
// calculation, and accept the unnecessary whitespace prefixes that will occasionally occur
if (i == 0 && count == 0)
continue;
log.debug(String.format(formatstr, names[i], count));
}
}
private static String nameOfRange(long[] bucketOffsets, int index) {
StringBuilder sb = new StringBuilder();
appendRange(sb, bucketOffsets, index);
return sb.toString();
}
private static void appendRange(StringBuilder sb, long[] bucketOffsets, int index) {
sb.append("[");
if (index == 0)
if (bucketOffsets[0] > 0)
// by original definition, this histogram is for values greater than zero only;
// if values of 0 or less are required, an entry of lb-1 must be inserted at the start
sb.append("1");
else
sb.append("-Inf");
else
sb.append(bucketOffsets[index - 1] + 1);
sb.append("..");
if (index == bucketOffsets.length)
sb.append("Inf");
else
sb.append(bucketOffsets[index]);
sb.append("]");
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof EstimatedHistogram))
return false;
EstimatedHistogram that = (EstimatedHistogram) o;
return Arrays.equals(getBucketOffsets(), that.getBucketOffsets()) &&
Arrays.equals(getBuckets(false), that.getBuckets(false));
}
@Override
public int hashCode() {
return Objects.hash(getBucketOffsets(), getBuckets(false));
}
}
| 5,562 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/TransportStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.transport.TransportStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class TransportStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(TransportStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_TransportMonitor";
private final Elasticsearch_TransportStatsReporter transportStatsReporter;
@Inject
public TransportStatsMonitor(IConfiguration config) {
super(config);
transportStatsReporter = new Elasticsearch_TransportStatsReporter();
Monitors.registerObject(transportStatsReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
TransportStatsBean transportStatsBean = new TransportStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("Transport stats are not available (node stats is not available)");
return;
}
TransportStats transportStats = nodeStats.getTransport();
if (transportStats == null) {
logger.info("Transport stats are not available");
return;
}
transportStatsBean.serverOpen = transportStats.getServerOpen();
transportStatsBean.rxCount = transportStats.getRxCount();
transportStatsBean.rxSize = transportStats.getRxSize().getBytes();
transportStatsBean.rxSizeDelta = transportStats.getRxSize().getBytes() - transportStatsBean.rxSize;
transportStatsBean.txCount = transportStats.getTxCount();
transportStatsBean.txSize = transportStats.getTxSize().getBytes();
transportStatsBean.txSizeDelta = transportStats.getTxSize().getBytes() - transportStatsBean.txSize;
} catch (Exception e) {
logger.warn("Failed to load transport stats data", e);
}
transportStatsReporter.transportStatsBean.set(transportStatsBean);
}
public class Elasticsearch_TransportStatsReporter {
private final AtomicReference<TransportStatsBean> transportStatsBean;
public Elasticsearch_TransportStatsReporter() {
transportStatsBean = new AtomicReference<TransportStatsBean>(new TransportStatsBean());
}
@Monitor(name = "server_open", type = DataSourceType.GAUGE)
public long getServerOpen() {
return transportStatsBean.get().serverOpen;
}
@Monitor(name = "rx_count", type = DataSourceType.GAUGE)
public long getRxCount() {
return transportStatsBean.get().rxCount;
}
@Monitor(name = "rx_size", type = DataSourceType.GAUGE)
public long getRxSize() {
return transportStatsBean.get().rxSize;
}
@Monitor(name = "rx_size_delta", type = DataSourceType.GAUGE)
public long getRxSizeDelta() {
return transportStatsBean.get().rxSizeDelta;
}
@Monitor(name = "tx_count", type = DataSourceType.GAUGE)
public long getTxCount() {
return transportStatsBean.get().txCount;
}
@Monitor(name = "tx_size", type = DataSourceType.GAUGE)
public long getTxSize() {
return transportStatsBean.get().txSize;
}
@Monitor(name = "tx_size_delta", type = DataSourceType.GAUGE)
public long getTxSizeDelta() {
return transportStatsBean.get().txSizeDelta;
}
}
private static class TransportStatsBean {
private long serverOpen;
private long rxCount;
private long rxSize;
private long rxSizeDelta;
private long txCount;
private long txSize;
private long txSizeDelta;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,563 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/NodeIndicesStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.indices.NodeIndicesStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
/**
* Note: percentiles over average latencies
* <p>
* Currently ES provides only cumulative query & index time along with cumulative query & index count.
* Hence percentile values are calculated based on the average between consecutive time
* (t1 & t2, t2 & t3, ... , tn-1 & tn) of metrics collection.
*/
@Singleton
public class NodeIndicesStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(NodeIndicesStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_NodeIndicesMonitor";
private final Elasticsearch_NodeIndicesStatsReporter nodeIndicesStatsReporter;
private final EstimatedHistogram latencySearchQuery95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencySearchQuery99Histo = new EstimatedHistogram();
private final EstimatedHistogram latencySearchFetch95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencySearchFetch99Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyGet95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyGet99Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyGetExists95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyGetExists99Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyGetMissing95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyGetMissing99Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyIndexing95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyIndexing99Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyIndexDelete95Histo = new EstimatedHistogram();
private final EstimatedHistogram latencyIndexDelete99Histo = new EstimatedHistogram();
private final double PERCENTILE_95 = 0.95;
private final double PERCENTILE_99 = 0.99;
private long cachedQueryCount;
private long cachedFetchCount;
private long cachedGetCount;
private long cachedGetExistsCount;
private long cachedGetMissingCount;
private long cachedIndexingIndexTotal;
private long cachedIndexingDeleteTotal;
private long cachedSearchQueryTime;
private long cachedSearchFetchTime;
private long cachedGetTime;
private long cachedGetExistsTime;
private long cachedGetMissingTime;
private long cachedIndexingTime;
private long cachedIndexDeleteTime;
@Inject
public NodeIndicesStatsMonitor(IConfiguration config) {
super(config);
nodeIndicesStatsReporter = new Elasticsearch_NodeIndicesStatsReporter();
Monitors.registerObject(nodeIndicesStatsReporter);
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
NodeIndicesStatsBean nodeIndicesStatsBean = new NodeIndicesStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("Node indices stats is not available (node stats is not available)");
return;
}
NodeIndicesStats nodeIndicesStats = nodeStats.getIndices();
if (nodeIndicesStats == null) {
logger.info("Node indices stats is not available");
return;
}
updateStoreDocs(nodeIndicesStatsBean, nodeIndicesStats);
updateRefreshFlush(nodeIndicesStatsBean, nodeIndicesStats);
updateMerge(nodeIndicesStatsBean, nodeIndicesStats);
updateCache(nodeIndicesStatsBean, nodeIndicesStats);
updateSearch(nodeIndicesStatsBean, nodeIndicesStats);
updateGet(nodeIndicesStatsBean, nodeIndicesStats);
updateIndexing(nodeIndicesStatsBean, nodeIndicesStats);
} catch (Exception e) {
logger.warn("Failed to load indices stats data", e);
}
nodeIndicesStatsReporter.nodeIndicesStatsBean.set(nodeIndicesStatsBean);
}
private void updateStoreDocs(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.storeSize = nodeIndicesStats.getStore().getSizeInBytes();
nodeIndicesStatsBean.storeThrottleTime = nodeIndicesStats.getStore().getThrottleTime().millis();
nodeIndicesStatsBean.docsCount = nodeIndicesStats.getDocs().getCount();
nodeIndicesStatsBean.docsDeleted = nodeIndicesStats.getDocs().getDeleted();
}
private void updateRefreshFlush(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.refreshTotal = nodeIndicesStats.getRefresh().getTotal();
nodeIndicesStatsBean.refreshTotalTime = nodeIndicesStats.getRefresh().getTotalTimeInMillis();
if (nodeIndicesStatsBean.refreshTotal != 0) {
nodeIndicesStatsBean.refreshAvgTimeInMillisPerRequest = nodeIndicesStatsBean.refreshTotalTime / nodeIndicesStatsBean.refreshTotal;
}
nodeIndicesStatsBean.flushTotal = nodeIndicesStats.getFlush().getTotal();
nodeIndicesStatsBean.flushTotalTime = nodeIndicesStats.getFlush().getTotalTimeInMillis();
if (nodeIndicesStatsBean.flushTotal != 0) {
nodeIndicesStatsBean.flushAvgTimeInMillisPerRequest = nodeIndicesStatsBean.flushTotalTime / nodeIndicesStatsBean.flushTotal;
}
}
private void updateMerge(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.mergesCurrent = nodeIndicesStats.getMerge().getCurrent();
nodeIndicesStatsBean.mergesCurrentDocs = nodeIndicesStats.getMerge().getCurrentNumDocs();
nodeIndicesStatsBean.mergesCurrentSize = nodeIndicesStats.getMerge().getCurrentSizeInBytes();
nodeIndicesStatsBean.mergesTotal = nodeIndicesStats.getMerge().getTotal();
nodeIndicesStatsBean.mergesTotalTime = nodeIndicesStats.getMerge().getTotalTimeInMillis();
nodeIndicesStatsBean.mergesTotalSize = nodeIndicesStats.getMerge().getTotalSizeInBytes();
}
private void updateCache(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.cacheFieldEvictions = nodeIndicesStats.getFieldData().getEvictions();
nodeIndicesStatsBean.cacheFieldSize = nodeIndicesStats.getFieldData().getMemorySizeInBytes();
}
private void updateSearch(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.searchQueryTotal = nodeIndicesStats.getSearch().getTotal().getQueryCount();
nodeIndicesStatsBean.searchFetchTotal = nodeIndicesStats.getSearch().getTotal().getFetchCount();
nodeIndicesStatsBean.searchQueryCurrent = nodeIndicesStats.getSearch().getTotal().getQueryCurrent();
long tmpSearchQueryDelta = nodeIndicesStatsBean.searchQueryTotal - cachedQueryCount;
nodeIndicesStatsBean.searchQueryDelta = tmpSearchQueryDelta < 0 ? 0 : tmpSearchQueryDelta;
long tmpSearchFetchDelta = nodeIndicesStatsBean.searchFetchTotal - cachedFetchCount;
nodeIndicesStatsBean.searchFetchDelta = tmpSearchFetchDelta < 0 ? 0 : tmpSearchFetchDelta;
nodeIndicesStatsBean.searchQueryTime = nodeIndicesStats.getSearch().getTotal().getQueryTimeInMillis();
nodeIndicesStatsBean.searchFetchTime = nodeIndicesStats.getSearch().getTotal().getFetchTimeInMillis();
long searchQueryDeltaTimeInMillis = (nodeIndicesStatsBean.searchQueryTime - cachedSearchQueryTime);
if (nodeIndicesStatsBean.searchQueryDelta != 0) {
recordSearchQueryLatencies(searchQueryDeltaTimeInMillis / nodeIndicesStatsBean.searchQueryDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencySearchQuery95 = latencySearchQuery95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencySearchQuery99 = latencySearchQuery99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencySearchQuery95 = 0;
nodeIndicesStatsBean.latencySearchQuery99 = 0;
}
if (nodeIndicesStatsBean.searchQueryTotal != 0) {
nodeIndicesStatsBean.searchQueryAvgTimeInMillisPerRequest = nodeIndicesStatsBean.searchQueryTime / nodeIndicesStatsBean.searchQueryTotal;
}
long searchFetchDeltaTimeInMillis = (nodeIndicesStatsBean.searchFetchTime - cachedSearchFetchTime);
if (nodeIndicesStatsBean.searchFetchDelta != 0) {
recordSearchFetchLatencies(searchFetchDeltaTimeInMillis / nodeIndicesStatsBean.searchFetchDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencySearchFetch95 = latencySearchFetch95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencySearchFetch99 = latencySearchFetch99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencySearchFetch95 = 0;
nodeIndicesStatsBean.latencySearchFetch99 = 0;
}
if (nodeIndicesStatsBean.searchFetchTotal != 0) {
nodeIndicesStatsBean.searchFetchAvgTimeInMillisPerRequest = nodeIndicesStatsBean.searchFetchTime / nodeIndicesStatsBean.searchFetchTotal;
}
nodeIndicesStatsBean.searchFetchCurrent = nodeIndicesStats.getSearch().getTotal().getFetchCurrent();
cachedQueryCount += nodeIndicesStatsBean.searchQueryDelta;
cachedFetchCount += nodeIndicesStatsBean.searchFetchDelta;
cachedSearchQueryTime += searchQueryDeltaTimeInMillis;
cachedSearchFetchTime += searchFetchDeltaTimeInMillis;
}
private void updateGet(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.getTotal = nodeIndicesStats.getGet().getCount();
nodeIndicesStatsBean.getExistsTotal = nodeIndicesStats.getGet().getExistsCount();
nodeIndicesStatsBean.getMissingTotal = nodeIndicesStats.getGet().getMissingCount();
nodeIndicesStatsBean.getTime = nodeIndicesStats.getGet().getTimeInMillis();
nodeIndicesStatsBean.getExistsTime = nodeIndicesStats.getGet().getExistsTimeInMillis();
nodeIndicesStatsBean.getMissingTime = nodeIndicesStats.getGet().getMissingTimeInMillis();
long tmpGetTotalDelta = nodeIndicesStatsBean.getTotal - cachedGetCount;
nodeIndicesStatsBean.getTotalDelta = tmpGetTotalDelta < 0 ? 0 : tmpGetTotalDelta;
long tmpGetExistsDelta = nodeIndicesStatsBean.getExistsTotal - cachedGetExistsCount;
nodeIndicesStatsBean.getExistsDelta = tmpGetExistsDelta < 0 ? 0 : tmpGetExistsDelta;
long tmpGetMissingDelta = nodeIndicesStatsBean.getMissingTotal - cachedGetMissingCount;
nodeIndicesStatsBean.getMissingDelta = tmpGetMissingDelta < 0 ? 0 : tmpGetMissingDelta;
long getDeltaTimeInMillis = (nodeIndicesStatsBean.getTime - cachedGetTime);
if (nodeIndicesStatsBean.getTotalDelta != 0) {
recordGetLatencies(getDeltaTimeInMillis / nodeIndicesStatsBean.getTotalDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencyGet95 = latencyGet95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencyGet99 = latencyGet99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencyGet95 = 0;
nodeIndicesStatsBean.latencyGet99 = 0;
}
if (nodeIndicesStatsBean.getTotal != 0)
nodeIndicesStatsBean.getTotalAvgTimeInMillisPerRequest = nodeIndicesStatsBean.getTime / nodeIndicesStatsBean.getTotal;
nodeIndicesStatsBean.getCurrent = nodeIndicesStats.getGet().current();
long getExistsDeltaTimeInMillies = (nodeIndicesStatsBean.getExistsTime - cachedGetExistsTime);
if (nodeIndicesStatsBean.getExistsDelta != 0) {
recordGetExistsLatencies(getExistsDeltaTimeInMillies / nodeIndicesStatsBean.getExistsDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencyGetExists95 = latencyGetExists95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencyGetExists99 = latencyGetExists99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencyGetExists95 = 0;
nodeIndicesStatsBean.latencyGetExists99 = 0;
}
if (nodeIndicesStatsBean.getExistsTotal != 0)
nodeIndicesStatsBean.getExistsAvgTimeInMillisPerRequest = nodeIndicesStatsBean.getExistsTime / nodeIndicesStatsBean.getExistsTotal;
long getMissingDeltaTimeInMillies = (nodeIndicesStatsBean.getMissingTime - cachedGetMissingTime);
if (nodeIndicesStatsBean.getMissingDelta != 0) {
recordGetMissingLatencies(getMissingDeltaTimeInMillies / nodeIndicesStatsBean.getMissingDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencyGetMissing95 = latencyGetMissing95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencyGetMissing99 = latencyGetMissing99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencyGetMissing95 = 0;
nodeIndicesStatsBean.latencyGetMissing99 = 0;
}
if (nodeIndicesStatsBean.getMissingTotal != 0) {
nodeIndicesStatsBean.getMissingAvgTimeInMillisPerRequest = nodeIndicesStatsBean.getMissingTime / nodeIndicesStatsBean.getMissingTotal;
}
cachedGetCount += nodeIndicesStatsBean.getTotalDelta;
cachedGetExistsCount += nodeIndicesStatsBean.getExistsDelta;
cachedGetMissingCount += nodeIndicesStatsBean.getMissingDelta;
cachedGetTime += getDeltaTimeInMillis;
cachedGetExistsTime += getExistsDeltaTimeInMillies;
cachedGetMissingTime += getMissingDeltaTimeInMillies;
}
private void updateIndexing(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) {
nodeIndicesStatsBean.indexingIndexTotal = nodeIndicesStats.getIndexing().getTotal().getIndexCount();
nodeIndicesStatsBean.indexingDeleteTotal = nodeIndicesStats.getIndexing().getTotal().getDeleteCount();
nodeIndicesStatsBean.indexingIndexCurrent = nodeIndicesStats.getIndexing().getTotal().getIndexCurrent();
long tmpIndexingIndexDelta = (nodeIndicesStatsBean.indexingIndexTotal - cachedIndexingIndexTotal);
nodeIndicesStatsBean.indexingIndexDelta = tmpIndexingIndexDelta < 0 ? 0 : tmpIndexingIndexDelta;
long tmpIndexingDeleteDelta = (nodeIndicesStatsBean.indexingDeleteTotal - cachedIndexingDeleteTotal);
nodeIndicesStatsBean.indexingDeleteDelta = tmpIndexingDeleteDelta < 0 ? 0 : tmpIndexingDeleteDelta;
nodeIndicesStatsBean.indexingIndexTimeInMillis = nodeIndicesStats.getIndexing().getTotal().getIndexTime().getMillis();
nodeIndicesStatsBean.indexingDeleteTime = nodeIndicesStats.getIndexing().getTotal().getDeleteTime().getMillis();
long indexingTimeInMillis = (nodeIndicesStatsBean.indexingIndexTimeInMillis - cachedIndexingTime);
if (nodeIndicesStatsBean.indexingIndexDelta != 0) {
recordIndexingLatencies(indexingTimeInMillis / nodeIndicesStatsBean.indexingIndexDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencyIndexing95 = latencyIndexing95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencyIndexing99 = latencyIndexing99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencyIndexing95 = 0;
nodeIndicesStatsBean.latencyIndexing99 = 0;
}
if (nodeIndicesStatsBean.indexingIndexTotal != 0) {
nodeIndicesStatsBean.indexingAvgTimeInMillisPerRequest = nodeIndicesStatsBean.indexingIndexTimeInMillis / nodeIndicesStatsBean.indexingIndexTotal;
}
long indexDeleteTimeInMillis = (nodeIndicesStatsBean.indexingDeleteTime - cachedIndexDeleteTime);
if (nodeIndicesStatsBean.indexingDeleteDelta != 0) {
recordIndexDeleteLatencies(indexDeleteTimeInMillis / nodeIndicesStatsBean.indexingDeleteDelta, TimeUnit.MILLISECONDS);
nodeIndicesStatsBean.latencyIndexDelete95 = latencyIndexDelete95Histo.percentile(PERCENTILE_95);
nodeIndicesStatsBean.latencyIndexDelete99 = latencyIndexDelete99Histo.percentile(PERCENTILE_99);
} else {
nodeIndicesStatsBean.latencyIndexDelete95 = 0;
nodeIndicesStatsBean.latencyIndexDelete99 = 0;
}
if (nodeIndicesStatsBean.indexingDeleteTotal != 0) {
nodeIndicesStatsBean.indexingDeleteAvgTimeInMillisPerRequest = nodeIndicesStatsBean.indexingDeleteTime / nodeIndicesStatsBean.indexingDeleteTotal;
}
nodeIndicesStatsBean.indexingDeleteCurrent = nodeIndicesStats.getIndexing().getTotal().getDeleteCurrent();
cachedIndexingIndexTotal += nodeIndicesStatsBean.indexingIndexDelta;
cachedIndexingDeleteTotal += nodeIndicesStatsBean.indexingDeleteDelta;
cachedIndexingTime += indexingTimeInMillis;
cachedIndexDeleteTime += indexDeleteTimeInMillis;
}
private void recordSearchQueryLatencies(long duration, TimeUnit unit) {
long searchQueryLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencySearchQuery95Histo.add(searchQueryLatency);
latencySearchQuery99Histo.add(searchQueryLatency);
}
private void recordSearchFetchLatencies(long duration, TimeUnit unit) {
long fetchQueryLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencySearchFetch95Histo.add(fetchQueryLatency);
latencySearchFetch99Histo.add(fetchQueryLatency);
}
private void recordGetLatencies(long duration, TimeUnit unit) {
long getLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencyGet95Histo.add(getLatency);
latencyGet99Histo.add(getLatency);
}
private void recordGetExistsLatencies(long duration, TimeUnit unit) {
long getExistsLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencyGetExists95Histo.add(getExistsLatency);
latencyGetExists99Histo.add(getExistsLatency);
}
private void recordGetMissingLatencies(long duration, TimeUnit unit) {
long getMissingLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencyGetMissing95Histo.add(getMissingLatency);
latencyGetMissing99Histo.add(getMissingLatency);
}
private void recordIndexingLatencies(long duration, TimeUnit unit) {
long indexingLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencyIndexing95Histo.add(indexingLatency);
latencyIndexing99Histo.add(indexingLatency);
}
private void recordIndexDeleteLatencies(long duration, TimeUnit unit) {
long indexDeleteLatency = TimeUnit.MICROSECONDS.convert(duration, unit);
latencyIndexDelete95Histo.add(indexDeleteLatency);
latencyIndexDelete99Histo.add(indexDeleteLatency);
}
@Override
public String getName() {
return METRIC_NAME;
}
private static class NodeIndicesStatsBean {
private long storeSize;
private long storeThrottleTime;
private long docsCount;
private long docsDeleted;
private long indexingIndexTotal;
private long indexingIndexTimeInMillis;
private double indexingAvgTimeInMillisPerRequest;
private long indexingIndexCurrent;
private long indexingDeleteTotal;
private long indexingDeleteTime;
private double indexingDeleteAvgTimeInMillisPerRequest;
private long indexingDeleteCurrent;
private long indexingIndexDelta;
private long indexingDeleteDelta;
private long getTotal;
private long getTime;
private double getTotalAvgTimeInMillisPerRequest;
private long getCurrent;
private long getExistsTotal;
private long getExistsTime;
private double getExistsAvgTimeInMillisPerRequest;
private long getMissingTotal;
private long getMissingTime;
private double getMissingAvgTimeInMillisPerRequest;
private long getTotalDelta;
private long getExistsDelta;
private long getMissingDelta;
private long searchQueryTotal;
private long searchQueryTime;
private double searchQueryAvgTimeInMillisPerRequest;
private long searchQueryCurrent;
private long searchQueryDelta;
private long searchFetchTotal;
private long searchFetchTime;
private double searchFetchAvgTimeInMillisPerRequest;
private long searchFetchCurrent;
private long searchFetchDelta;
private long cacheFieldEvictions;
private long cacheFieldSize;
private long cacheFilterEvictions;
private long cacheFilterSize;
private long mergesCurrent;
private long mergesCurrentDocs;
private long mergesCurrentSize;
private long mergesTotal;
private long mergesTotalTime;
private long mergesTotalSize;
private long refreshTotal;
private long refreshTotalTime;
private double refreshAvgTimeInMillisPerRequest;
private long flushTotal;
private long flushTotalTime;
private double flushAvgTimeInMillisPerRequest;
private double latencySearchQuery95;
private double latencySearchQuery99;
private double latencySearchFetch95;
private double latencySearchFetch99;
private double latencyGet95;
private double latencyGet99;
private double latencyGetExists95;
private double latencyGetExists99;
private double latencyGetMissing95;
private double latencyGetMissing99;
private double latencyIndexing95;
private double latencyIndexing99;
private double latencyIndexDelete95;
private double latencyIndexDelete99;
}
public class Elasticsearch_NodeIndicesStatsReporter {
private final AtomicReference<NodeIndicesStatsBean> nodeIndicesStatsBean;
public Elasticsearch_NodeIndicesStatsReporter() {
nodeIndicesStatsBean = new AtomicReference<NodeIndicesStatsBean>(new NodeIndicesStatsBean());
}
@Monitor(name = "store_size", type = DataSourceType.GAUGE)
public long getStoreSize() {
return nodeIndicesStatsBean.get().storeSize;
}
@Monitor(name = "store_throttle_time", type = DataSourceType.GAUGE)
public long getStoreThrottleTime() {
return nodeIndicesStatsBean.get().storeThrottleTime;
}
@Monitor(name = "docs_count", type = DataSourceType.GAUGE)
public long getDocsCount() {
return nodeIndicesStatsBean.get().docsCount;
}
@Monitor(name = "docs_deleted", type = DataSourceType.GAUGE)
public long getDocsDeleted() {
return nodeIndicesStatsBean.get().docsDeleted;
}
//Indexing
@Monitor(name = "indexing_index_total", type = DataSourceType.COUNTER)
public long getIndexingIndexTotal() {
return nodeIndicesStatsBean.get().indexingIndexTotal;
}
@Monitor(name = "indexing_index_time_in_millis", type = DataSourceType.COUNTER)
public long getIndexingIndexTimeInMillis() {
return nodeIndicesStatsBean.get().indexingIndexTimeInMillis;
}
@Monitor(name = "indexing_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getIndexingAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().indexingAvgTimeInMillisPerRequest;
}
@Monitor(name = "indexing_index_current", type = DataSourceType.GAUGE)
public long getIndexingIndexCurrent() {
return nodeIndicesStatsBean.get().indexingIndexCurrent;
}
@Monitor(name = "indexing_delete_total", type = DataSourceType.COUNTER)
public long getIndexingDeleteTotal() {
return nodeIndicesStatsBean.get().indexingDeleteTotal;
}
@Monitor(name = "indexing_delete_time", type = DataSourceType.COUNTER)
public long getIndexingDeleteTime() {
return nodeIndicesStatsBean.get().indexingDeleteTime;
}
@Monitor(name = "indexing_delete_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getIndexingDeleteAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().indexingDeleteAvgTimeInMillisPerRequest;
}
@Monitor(name = "indexing_delete_current", type = DataSourceType.GAUGE)
public long getIndexingDeleteCurrent() {
return nodeIndicesStatsBean.get().indexingDeleteCurrent;
}
@Monitor(name = "indexing_index_delta", type = DataSourceType.GAUGE)
public long getIndexingIndexDelta() {
return nodeIndicesStatsBean.get().indexingIndexDelta;
}
@Monitor(name = "indexing_delete_delta", type = DataSourceType.GAUGE)
public long getIndexingDeleteDelta() {
return nodeIndicesStatsBean.get().indexingDeleteDelta;
}
//Get
@Monitor(name = "get_total", type = DataSourceType.COUNTER)
public long getGetTotal() {
return nodeIndicesStatsBean.get().getTotal;
}
@Monitor(name = "get_time", type = DataSourceType.COUNTER)
public long getGetTime() {
return nodeIndicesStatsBean.get().getTime;
}
@Monitor(name = "total_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getTotalAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().getTotalAvgTimeInMillisPerRequest;
}
@Monitor(name = "get_current", type = DataSourceType.GAUGE)
public long getGetCurrent() {
return nodeIndicesStatsBean.get().getCurrent;
}
@Monitor(name = "get_exists_total", type = DataSourceType.COUNTER)
public long getGetExistsTotal() {
return nodeIndicesStatsBean.get().getExistsTotal;
}
@Monitor(name = "get_exists_time", type = DataSourceType.COUNTER)
public long getGetExistsTime() {
return nodeIndicesStatsBean.get().getExistsTime;
}
@Monitor(name = "exists_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getExistsAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().getExistsAvgTimeInMillisPerRequest;
}
@Monitor(name = "get_missing_total", type = DataSourceType.COUNTER)
public long getGetMissingTotal() {
return nodeIndicesStatsBean.get().getMissingTotal;
}
@Monitor(name = "get_missing_time", type = DataSourceType.COUNTER)
public long getGetMissingTime() {
return nodeIndicesStatsBean.get().getMissingTime;
}
@Monitor(name = "missing_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getMissingAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().getMissingAvgTimeInMillisPerRequest;
}
//Search
@Monitor(name = "get_total_delta", type = DataSourceType.GAUGE)
public long getGetTotalDelta() {
return nodeIndicesStatsBean.get().getTotalDelta;
}
@Monitor(name = "get_exists_delta", type = DataSourceType.GAUGE)
public long getGetExistsDelta() {
return nodeIndicesStatsBean.get().getExistsDelta;
}
@Monitor(name = "get_missing_delta", type = DataSourceType.GAUGE)
public long getGetMissingDelta() {
return nodeIndicesStatsBean.get().getMissingDelta;
}
@Monitor(name = "search_query_total", type = DataSourceType.COUNTER)
public long getSearchQueryTotal() {
return nodeIndicesStatsBean.get().searchQueryTotal;
}
@Monitor(name = "search_query_time", type = DataSourceType.COUNTER)
public long getSearchQueryTime() {
return nodeIndicesStatsBean.get().searchQueryTime;
}
@Monitor(name = "search_query_current", type = DataSourceType.GAUGE)
public long getSearchQueryCurrent() {
return nodeIndicesStatsBean.get().searchQueryCurrent;
}
@Monitor(name = "search_query_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getSearchQueryAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().searchQueryAvgTimeInMillisPerRequest;
}
@Monitor(name = "search_query_delta", type = DataSourceType.GAUGE)
public long getSearchQueryDelta() {
return nodeIndicesStatsBean.get().searchQueryDelta;
}
@Monitor(name = "search_fetch_total", type = DataSourceType.COUNTER)
public long getSearchFetchTotal() {
return nodeIndicesStatsBean.get().searchFetchTotal;
}
@Monitor(name = "search_fetch_time", type = DataSourceType.COUNTER)
public long getSearchFetchTime() {
return nodeIndicesStatsBean.get().searchFetchTime;
}
@Monitor(name = "search_fetch_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getSearchFetchAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().searchFetchAvgTimeInMillisPerRequest;
}
@Monitor(name = "search_fetch_current", type = DataSourceType.GAUGE)
public long getSearchFetchCurrent() {
return nodeIndicesStatsBean.get().searchFetchCurrent;
}
@Monitor(name = "search_fetch_delta", type = DataSourceType.GAUGE)
public long getSearchFetchDelta() {
return nodeIndicesStatsBean.get().searchFetchDelta;
}
//Cache
@Monitor(name = "cache_field_evictions", type = DataSourceType.GAUGE)
public long getCacheFieldEvictions() {
return nodeIndicesStatsBean.get().cacheFieldEvictions;
}
@Monitor(name = "cache_field_size", type = DataSourceType.GAUGE)
public long getCacheFieldSize() {
return nodeIndicesStatsBean.get().cacheFieldSize;
}
@Monitor(name = "cache_filter_evictions", type = DataSourceType.GAUGE)
public long getCacheFilterEvictions() {
return nodeIndicesStatsBean.get().cacheFilterEvictions;
}
@Monitor(name = "cache_filter_size", type = DataSourceType.GAUGE)
public long getCacheFilterSize() {
return nodeIndicesStatsBean.get().cacheFilterSize;
}
//Merge
@Monitor(name = "merges_current", type = DataSourceType.GAUGE)
public long getMergesCurrent() {
return nodeIndicesStatsBean.get().mergesCurrent;
}
@Monitor(name = "merges_current_docs", type = DataSourceType.GAUGE)
public long getMergesCurrentDocs() {
return nodeIndicesStatsBean.get().mergesCurrentDocs;
}
@Monitor(name = "merges_current_size", type = DataSourceType.GAUGE)
public long getMergesCurrentSize() {
return nodeIndicesStatsBean.get().mergesCurrentSize;
}
@Monitor(name = "merges_total", type = DataSourceType.COUNTER)
public long getMergesTotal() {
return nodeIndicesStatsBean.get().mergesTotal;
}
@Monitor(name = "merges_total_time", type = DataSourceType.COUNTER)
public long getMergesTotalTime() {
return nodeIndicesStatsBean.get().mergesTotalTime;
}
@Monitor(name = "merges_total_size", type = DataSourceType.GAUGE)
public long getMergesTotalSize() {
return nodeIndicesStatsBean.get().mergesTotalSize;
}
//Refresh
@Monitor(name = "refresh_total", type = DataSourceType.COUNTER)
public long getRefreshTotal() {
return nodeIndicesStatsBean.get().refreshTotal;
}
@Monitor(name = "refresh_total_time", type = DataSourceType.COUNTER)
public long getRefreshTotalTime() {
return nodeIndicesStatsBean.get().refreshTotalTime;
}
@Monitor(name = "refresh_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getRefreshAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().refreshAvgTimeInMillisPerRequest;
}
//Flush
@Monitor(name = "flush_total", type = DataSourceType.COUNTER)
public long getFlushTotal() {
return nodeIndicesStatsBean.get().flushTotal;
}
@Monitor(name = "flush_total_time", type = DataSourceType.COUNTER)
public long getFlushTotalTime() {
return nodeIndicesStatsBean.get().flushTotalTime;
}
@Monitor(name = "flush_avg_time_in_millis_per_request", type = DataSourceType.GAUGE)
public double getFlushAvgTimeInMillisPerRequest() {
return nodeIndicesStatsBean.get().flushAvgTimeInMillisPerRequest;
}
//Percentile Latencies
@Monitor(name = "latencySearchQuery95", type = DataSourceType.GAUGE)
public double getLatencySearchQuery95() {
return nodeIndicesStatsBean.get().latencySearchQuery95;
}
@Monitor(name = "latencySearchQuery99", type = DataSourceType.GAUGE)
public double getLatencySearchQuery99() {
return nodeIndicesStatsBean.get().latencySearchQuery99;
}
@Monitor(name = "latencySearchFetch95", type = DataSourceType.GAUGE)
public double getLatencySearchFetch95() {
return nodeIndicesStatsBean.get().latencySearchFetch95;
}
@Monitor(name = "latencySearchFetch99", type = DataSourceType.GAUGE)
public double getLatencySearchFetch99() {
return nodeIndicesStatsBean.get().latencySearchFetch99;
}
@Monitor(name = "latencyGet95", type = DataSourceType.GAUGE)
public double getLatencyGet95() {
return nodeIndicesStatsBean.get().latencyGet95;
}
@Monitor(name = "latencyGet99", type = DataSourceType.GAUGE)
public double getLatencyGet99() {
return nodeIndicesStatsBean.get().latencyGet99;
}
@Monitor(name = "latencyGetExists95", type = DataSourceType.GAUGE)
public double getLatencyGetExists95() {
return nodeIndicesStatsBean.get().latencyGetExists95;
}
@Monitor(name = "latencyGetExists99", type = DataSourceType.GAUGE)
public double getLatencyGetExists99() {
return nodeIndicesStatsBean.get().latencyGetExists99;
}
@Monitor(name = "latencyGetMissing95", type = DataSourceType.GAUGE)
public double getLatencyGetMissing95() {
return nodeIndicesStatsBean.get().latencyGetMissing95;
}
@Monitor(name = "latencyGetMissing99", type = DataSourceType.GAUGE)
public double getLatencyGetMissing99() {
return nodeIndicesStatsBean.get().latencyGetMissing99;
}
@Monitor(name = "latencyIndexing95", type = DataSourceType.GAUGE)
public double getLatencyIndexing95() {
return nodeIndicesStatsBean.get().latencyIndexing95;
}
@Monitor(name = "latencyIndexing99", type = DataSourceType.GAUGE)
public double getLatencyIndexing99() {
return nodeIndicesStatsBean.get().latencyIndexing99;
}
@Monitor(name = "latencyIndexDelete95", type = DataSourceType.GAUGE)
public double getLatencyIndexDelete95() {
return nodeIndicesStatsBean.get().latencyIndexDelete95;
}
@Monitor(name = "latencyIndexDelete99", type = DataSourceType.GAUGE)
public double getLatencyIndexDelete99() {
return nodeIndicesStatsBean.get().latencyIndexDelete99;
}
}
}
| 5,564 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/NodeHealthMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class NodeHealthMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(NodeHealthMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_NodeHealthMonitor";
private final ElasticsearchNodeHealthReporter healthReporter;
@Inject
public NodeHealthMonitor(IConfiguration config) {
super(config);
healthReporter = new ElasticsearchNodeHealthReporter();
Monitors.registerObject(healthReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.getWasElasticsearchStarted()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
HealthBean healthBean = new HealthBean();
try {
healthBean.esprocessrunning = 0;
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
logger.info("Elasticsearch process is up & running");
healthBean.esprocessrunning = 1;
}
} catch (Exception e) {
resetHealthStats(healthBean);
logger.warn("failed to check if Elasticsearch process is running", e);
}
healthReporter.healthBean.set(healthBean);
}
public class ElasticsearchNodeHealthReporter {
private final AtomicReference<HealthBean> healthBean;
public ElasticsearchNodeHealthReporter() {
healthBean = new AtomicReference<HealthBean>(new HealthBean());
}
@Monitor(name = "es_isesprocessdown", type = DataSourceType.GAUGE)
public int getIsEsProcessDown() {
return healthBean.get().esprocessrunning;
}
}
private static class HealthBean {
private int esprocessrunning = -1;
}
@Override
public String getName() {
return METRIC_NAME;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
private void resetHealthStats(HealthBean healthBean) {
healthBean.esprocessrunning = -1;
}
}
| 5,565 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/SnapshotBackupMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.backup.SnapshotBackupManager;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class SnapshotBackupMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(SnapshotBackupMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_SnapshotBackupMonitor";
private final Elasticsearch_SnapshotBackupReporter snapshotBackupReporter;
private final SnapshotBackupManager snapshotBackupManager;
@Inject
public SnapshotBackupMonitor(IConfiguration config, SnapshotBackupManager snapshotBackupManager) {
super(config);
snapshotBackupReporter = new Elasticsearch_SnapshotBackupReporter();
this.snapshotBackupManager = snapshotBackupManager;
Monitors.registerObject(snapshotBackupReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
SnapshotBackupBean snapshotBackupBean = new SnapshotBackupBean();
try {
snapshotBackupBean.snapshotSuccess = snapshotBackupManager.getNumSnapshotSuccess();
snapshotBackupBean.snapshotFailure = snapshotBackupManager.getNumSnapshotFailure();
} catch (Exception e) {
logger.warn("failed to load Cluster SnapshotBackup Status", e);
}
snapshotBackupReporter.snapshotBackupBean.set(snapshotBackupBean);
}
public class Elasticsearch_SnapshotBackupReporter {
private final AtomicReference<SnapshotBackupBean> snapshotBackupBean;
public Elasticsearch_SnapshotBackupReporter() {
snapshotBackupBean = new AtomicReference<SnapshotBackupBean>(new SnapshotBackupBean());
}
@Monitor(name = "snapshot_success", type = DataSourceType.GAUGE)
public int getSnapshotSuccess() {
return snapshotBackupBean.get().snapshotSuccess;
}
@Monitor(name = "snapshot_failure", type = DataSourceType.GAUGE)
public int getSnapshotFailure() {
return snapshotBackupBean.get().snapshotFailure;
}
}
private static class SnapshotBackupBean {
private int snapshotSuccess;
private int snapshotFailure;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 3600 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,566 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/AllCircuitBreakerStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
import org.elasticsearch.indices.breaker.CircuitBreakerStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class AllCircuitBreakerStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(AllCircuitBreakerStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_AllCircuitBreakerStatsMonitor";
private final Elasticsearch_AllCircuitBreakerStatsReporter allCircuitBreakerStatsReporter;
@Inject
public AllCircuitBreakerStatsMonitor(IConfiguration config) {
super(config);
allCircuitBreakerStatsReporter = new Elasticsearch_AllCircuitBreakerStatsReporter();
Monitors.registerObject(allCircuitBreakerStatsReporter);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
AllCircuitBreakerStatsBean allCircuitBreakerStatsBean = new AllCircuitBreakerStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("Circuit breaker stats is not available (node stats is not available)");
return;
}
AllCircuitBreakerStats allCircuitBreakerStats = nodeStats.getBreaker();
if (allCircuitBreakerStats == null) {
logger.info("Circuit breaker stats is not available");
return;
}
CircuitBreakerStats[] circuitBreakerStats = allCircuitBreakerStats.getAllStats();
if (circuitBreakerStats == null || circuitBreakerStats.length == 0) {
logger.info("Circuit breaker stats is not available (stats are empty)");
return;
}
for (CircuitBreakerStats circuitBreakerStat : circuitBreakerStats) {
if (CircuitBreaker.FIELDDATA.equals(circuitBreakerStat.getName())) {
allCircuitBreakerStatsBean.fieldDataEstimatedSizeInBytes = circuitBreakerStat.getEstimated();
allCircuitBreakerStatsBean.fieldDataLimitMaximumSizeInBytes = circuitBreakerStat.getLimit();
allCircuitBreakerStatsBean.fieldDataOverhead = circuitBreakerStat.getOverhead();
allCircuitBreakerStatsBean.fieldDataTrippedCount = circuitBreakerStat.getTrippedCount();
}
if (CircuitBreaker.REQUEST.equals(circuitBreakerStat.getName())) {
allCircuitBreakerStatsBean.requestEstimatedSizeInBytes = circuitBreakerStat.getEstimated();
allCircuitBreakerStatsBean.requestLimitMaximumSizeInBytes = circuitBreakerStat.getLimit();
allCircuitBreakerStatsBean.requestOverhead = circuitBreakerStat.getOverhead();
allCircuitBreakerStatsBean.requestTrippedCount = circuitBreakerStat.getTrippedCount();
}
}
} catch (Exception e) {
logger.warn("Failed to load circuit breaker stats data", e);
}
allCircuitBreakerStatsReporter.allCircuitBreakerStatsBean.set(allCircuitBreakerStatsBean);
}
public class Elasticsearch_AllCircuitBreakerStatsReporter {
private final AtomicReference<AllCircuitBreakerStatsBean> allCircuitBreakerStatsBean;
public Elasticsearch_AllCircuitBreakerStatsReporter() {
allCircuitBreakerStatsBean = new AtomicReference<AllCircuitBreakerStatsBean>(new AllCircuitBreakerStatsBean());
}
@Monitor(name = "field_data_estimated_size_in_bytes", type = DataSourceType.GAUGE)
public long getFieldDataEstimatedSizeInBytes() {
return allCircuitBreakerStatsBean.get().fieldDataEstimatedSizeInBytes;
}
@Monitor(name = "field_data_limit_maximum_size_in_bytes", type = DataSourceType.GAUGE)
public long getFieldDataLimitMaximumSizeInBytes() {
return allCircuitBreakerStatsBean.get().fieldDataLimitMaximumSizeInBytes;
}
@Monitor(name = "field_data_tripped_count", type = DataSourceType.GAUGE)
public double getFieldDataTrippedCount() {
return allCircuitBreakerStatsBean.get().fieldDataTrippedCount;
}
@Monitor(name = "field_data_overhead", type = DataSourceType.GAUGE)
public double getFieldDataOverhead() {
return allCircuitBreakerStatsBean.get().fieldDataOverhead;
}
@Monitor(name = "request_estimated_size_in_bytes", type = DataSourceType.GAUGE)
public long getRequestEstimatedSizeInBytes() {
return allCircuitBreakerStatsBean.get().requestEstimatedSizeInBytes;
}
@Monitor(name = "request_limit_maximum_size_in_bytes", type = DataSourceType.GAUGE)
public long getRequestLimitMaximumSizeInBytes() {
return allCircuitBreakerStatsBean.get().requestLimitMaximumSizeInBytes;
}
@Monitor(name = "request_tripped_count", type = DataSourceType.GAUGE)
public double getRequestTrippedCount() {
return allCircuitBreakerStatsBean.get().requestTrippedCount;
}
@Monitor(name = "request_overhead", type = DataSourceType.GAUGE)
public double getRequestOverhead() {
return allCircuitBreakerStatsBean.get().requestOverhead;
}
}
private static class AllCircuitBreakerStatsBean {
private long fieldDataEstimatedSizeInBytes;
private long fieldDataLimitMaximumSizeInBytes;
private long fieldDataTrippedCount;
private double fieldDataOverhead;
private long requestEstimatedSizeInBytes;
private long requestLimitMaximumSizeInBytes;
private long requestTrippedCount;
private double requestOverhead;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,567 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/ProcessStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.monitor.process.ProcessStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class ProcessStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(ProcessStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_ProcessStatsMonitor";
private final Elasticsearch_ProcessStatsReporter processStatsReporter;
@Inject
public ProcessStatsMonitor(IConfiguration config) {
super(config);
processStatsReporter = new Elasticsearch_ProcessStatsReporter();
Monitors.registerObject(processStatsReporter);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
ProcessStatsBean processStatsBean = new ProcessStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("Process stats are not available (node stats is not available)");
return;
}
ProcessStats processStats = nodeStats.getProcess();
if (processStats == null) {
logger.info("Process stats are not available");
return;
}
//Memory
processStatsBean.totalVirtualInBytes = processStats.getMem().getTotalVirtual().getBytes();
//CPU
processStatsBean.cpuPercent = processStats.getCpu().getPercent();
processStatsBean.totalInMillis = processStats.getCpu().getTotal().getMillis();
//Open file descriptors
processStatsBean.openFileDescriptors = processStats.getOpenFileDescriptors();
//Timestamp
processStatsBean.cpuTimestamp = processStats.getTimestamp();
} catch (Exception e) {
logger.warn("Failed to load process stats data", e);
}
processStatsReporter.processStatsBean.set(processStatsBean);
}
public class Elasticsearch_ProcessStatsReporter {
private final AtomicReference<ProcessStatsBean> processStatsBean;
public Elasticsearch_ProcessStatsReporter() {
processStatsBean = new AtomicReference<ProcessStatsBean>(new ProcessStatsBean());
}
@Monitor(name = "resident_in_bytes", type = DataSourceType.GAUGE)
public long getResidentInBytes() {
return processStatsBean.get().residentInBytes;
}
@Monitor(name = "share_in_bytes", type = DataSourceType.GAUGE)
public long getShareInBytes() {
return processStatsBean.get().shareInBytes;
}
@Monitor(name = "total_virtual_in_bytes", type = DataSourceType.GAUGE)
public long getTotalVirtualInBytes() {
return processStatsBean.get().totalVirtualInBytes;
}
@Monitor(name = "cpu_percent", type = DataSourceType.GAUGE)
public short getCpuPercent() {
return processStatsBean.get().cpuPercent;
}
@Monitor(name = "sys_in_millis", type = DataSourceType.GAUGE)
public long getSysInMillis() {
return processStatsBean.get().sysInMillis;
}
@Monitor(name = "user_in_millis", type = DataSourceType.GAUGE)
public long getUserInMillis() {
return processStatsBean.get().userInMillis;
}
@Monitor(name = "total_in_millis", type = DataSourceType.GAUGE)
public long getTotalInMillis() {
return processStatsBean.get().totalInMillis;
}
@Monitor(name = "open_file_descriptors", type = DataSourceType.GAUGE)
public double getOpenFileDescriptors() {
return processStatsBean.get().openFileDescriptors;
}
@Monitor(name = "cpu_timestamp", type = DataSourceType.GAUGE)
public long getCpuTimestamp() {
return processStatsBean.get().cpuTimestamp;
}
}
private static class ProcessStatsBean {
private long residentInBytes;
private long shareInBytes;
private long totalVirtualInBytes;
private short cpuPercent;
private long sysInMillis;
private long userInMillis;
private long totalInMillis;
private long openFileDescriptors;
private long cpuTimestamp;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,568 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/FsStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.monitor.fs.FsInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class FsStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(FsStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_FsStatsMonitor";
private final Elasticsearch_FsStatsReporter fsStatsReporter;
@Inject
public FsStatsMonitor(IConfiguration config) {
super(config);
fsStatsReporter = new Elasticsearch_FsStatsReporter();
Monitors.registerObject(fsStatsReporter);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
FsStatsBean fsStatsBean = new FsStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("File system info is not available (node stats are not available)");
return;
}
FsInfo fsInfo = nodeStats.getFs();
if (fsInfo == null) {
logger.info("File system info is not available");
return;
}
fsStatsBean.total = fsInfo.getTotal().getTotal().getBytes();
fsStatsBean.free = fsInfo.getTotal().getFree().getBytes();
fsStatsBean.available = fsInfo.getTotal().getAvailable().getBytes();
fsStatsBean.availableDiskPercent = (fsStatsBean.available * 100) / fsStatsBean.total;
} catch (Exception e) {
logger.warn("Failed to load file system stats data", e);
}
fsStatsReporter.fsStatsBean.set(fsStatsBean);
}
public class Elasticsearch_FsStatsReporter {
private final AtomicReference<FsStatsBean> fsStatsBean;
public Elasticsearch_FsStatsReporter() {
fsStatsBean = new AtomicReference<FsStatsBean>(new FsStatsBean());
}
@Monitor(name = "total_bytes", type = DataSourceType.GAUGE)
public long getTotalBytes() {
return fsStatsBean.get().total;
}
@Monitor(name = "free_bytes", type = DataSourceType.GAUGE)
public long getFreeBytes() {
return fsStatsBean.get().free;
}
@Monitor(name = "available_bytes", type = DataSourceType.GAUGE)
public long getAvailableBytes() {
return fsStatsBean.get().available;
}
@Monitor(name = "disk_reads", type = DataSourceType.GAUGE)
public long geDiskReads() {
return fsStatsBean.get().diskReads;
}
@Monitor(name = "disk_writes", type = DataSourceType.GAUGE)
public long getDiskWrites() {
return fsStatsBean.get().diskWrites;
}
@Monitor(name = "disk_read_bytes", type = DataSourceType.GAUGE)
public long getDiskReadBytes() {
return fsStatsBean.get().diskReadBytes;
}
@Monitor(name = "disk_write_bytes", type = DataSourceType.GAUGE)
public long getDiskWriteBytes() {
return fsStatsBean.get().diskWriteBytes;
}
@Monitor(name = "disk_queue", type = DataSourceType.GAUGE)
public double getDiskQueue() {
return fsStatsBean.get().diskQueue;
}
@Monitor(name = "disk_service_time", type = DataSourceType.GAUGE)
public double getDiskServiceTime() {
return fsStatsBean.get().diskServiceTime;
}
@Monitor(name = "available_disk_percent", type = DataSourceType.GAUGE)
public long getAvailableDiskPercent() {
return fsStatsBean.get().availableDiskPercent;
}
}
private static class FsStatsBean {
private long total;
private long free;
private long available;
private long diskReads;
private long diskWrites;
private long diskReadBytes;
private long diskWriteBytes;
private double diskQueue;
private double diskServiceTime;
private long availableDiskPercent;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,569 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/HealthMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.identity.InstanceManager;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.raigad.utils.ElasticsearchUtils;
import com.netflix.raigad.utils.HttpModule;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.unit.TimeValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class HealthMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(HealthMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_HealthMonitor";
private final Elasticsearch_HealthReporter healthReporter;
private final InstanceManager instanceManager;
private static TimeValue MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(60);
private final DiscoveryClient discoveryClient;
private final HttpModule httpModule;
@Inject
public HealthMonitor(IConfiguration config, InstanceManager instanceManager, HttpModule httpModule) {
super(config);
this.instanceManager = instanceManager;
this.httpModule = httpModule;
healthReporter = new Elasticsearch_HealthReporter();
discoveryClient = DiscoveryManager.getInstance().getDiscoveryClient();
Monitors.registerObject(healthReporter);
}
@Override
public void execute() throws Exception {
// Only start monitoring if Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not running, check back again later";
logger.info(exceptionMsg);
return;
}
// In case we configured only the master node to report metrics and this node is not a master - bail out
if (config.reportMetricsFromMasterOnly() && !ElasticsearchUtils.amIMasterNode(config, httpModule)) {
return;
}
HealthBean healthBean = new HealthBean();
try {
Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient();
ClusterHealthStatus clusterHealthStatus = esTransportClient.admin().cluster().prepareHealth().setTimeout(MASTER_NODE_TIMEOUT).execute().get().getStatus();
ClusterHealthResponse clusterHealthResponse = esTransportClient.admin().cluster().prepareHealth().execute().actionGet(MASTER_NODE_TIMEOUT);
if (clusterHealthStatus == null) {
logger.info("ClusterHealthStatus is null, hence returning (no health).");
resetHealthStats(healthBean);
return;
}
//Check if status = GREEN, YELLOW or RED
if (clusterHealthStatus.name().equalsIgnoreCase("GREEN")) {
healthBean.greenorredstatus = 0;
healthBean.greenoryellowstatus = 0;
} else if (clusterHealthStatus.name().equalsIgnoreCase("YELLOW")) {
healthBean.greenoryellowstatus = 1;
healthBean.greenorredstatus = 0;
} else if (clusterHealthStatus.name().equalsIgnoreCase("RED")) {
healthBean.greenorredstatus = 1;
healthBean.greenoryellowstatus = 0;
}
if (config.isNodeMismatchWithDiscoveryEnabled()) {
// Check if there is node mismatch between discovery and ES
healthBean.nodematch = (clusterHealthResponse.getNumberOfNodes() == instanceManager.getAllInstances().size()) ? 0 : 1;
} else {
healthBean.nodematch = (clusterHealthResponse.getNumberOfNodes() == config.getDesiredNumberOfNodesInCluster()) ? 0 : 1;
}
if (config.isEurekaHealthCheckEnabled()) {
healthBean.eurekanodematch = (clusterHealthResponse.getNumberOfNodes() == discoveryClient.getApplication(config.getAppName()).getInstances().size()) ? 0 : 1;
}
} catch (Exception e) {
resetHealthStats(healthBean);
logger.warn("Failed to load cluster health status", e);
}
healthReporter.healthBean.set(healthBean);
}
public class Elasticsearch_HealthReporter {
private final AtomicReference<HealthBean> healthBean;
public Elasticsearch_HealthReporter() {
healthBean = new AtomicReference<HealthBean>(new HealthBean());
}
@Monitor(name = "es_healthstatus_greenorred", type = DataSourceType.GAUGE)
public int getEsHealthstatusGreenorred() {
return healthBean.get().greenorredstatus;
}
@Monitor(name = "es_healthstatus_greenoryellow", type = DataSourceType.GAUGE)
public int getEsHealthstatusGreenoryellow() {
return healthBean.get().greenoryellowstatus;
}
@Monitor(name = "es_nodematchstatus", type = DataSourceType.GAUGE)
public int getEsNodematchstatus() {
return healthBean.get().nodematch;
}
@Monitor(name = "es_eurekanodematchstatus", type = DataSourceType.GAUGE)
public int getEsEurekanodematchstatus() {
return healthBean.get().eurekanodematch;
}
}
private static class HealthBean {
private int greenorredstatus = -1;
private int greenoryellowstatus = -1;
private int nodematch = -1;
private int eurekanodematch = -1;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
private void resetHealthStats(HealthBean healthBean) {
healthBean.greenorredstatus = -1;
healthBean.greenoryellowstatus = -1;
healthBean.nodematch = -1;
healthBean.eurekanodematch = -1;
}
}
| 5,570 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/HttpStatsMonitor.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.http.HttpStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class HttpStatsMonitor extends Task {
private static final Logger logger = LoggerFactory.getLogger(HttpStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_HttpStatsMonitor";
private final Elasticsearch_HttpStatsReporter httpStatsReporter;
@Inject
public HttpStatsMonitor(IConfiguration config) {
super(config);
httpStatsReporter = new Elasticsearch_HttpStatsReporter();
Monitors.registerObject(httpStatsReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
HttpStatsBean httpStatsBean = new HttpStatsBean();
try {
NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config);
NodeStats nodeStats = null;
List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes();
if (nodeStatsList.size() > 0) {
nodeStats = nodeStatsList.get(0);
}
if (nodeStats == null) {
logger.info("HTTP stats is not available (node stats are not available)");
return;
}
HttpStats httpStats = nodeStats.getHttp();
if (httpStats == null) {
logger.info("HTTP stats is not available");
return;
}
httpStatsBean.serverOpen = httpStats.getServerOpen();
httpStatsBean.totalOpen = httpStats.getTotalOpen();
} catch (Exception e) {
logger.warn("Failed to load HTTP stats data", e);
}
httpStatsReporter.httpStatsBean.set(httpStatsBean);
}
public class Elasticsearch_HttpStatsReporter {
private final AtomicReference<HttpStatsBean> httpStatsBean;
public Elasticsearch_HttpStatsReporter() {
httpStatsBean = new AtomicReference<>(new HttpStatsBean());
}
@Monitor(name = "server_open", type = DataSourceType.GAUGE)
public long getServerOpen() {
return httpStatsBean.get().serverOpen;
}
@Monitor(name = "total_open", type = DataSourceType.GAUGE)
public long getTotalOpen() {
return httpStatsBean.get().totalOpen;
}
}
private static class HttpStatsBean {
private long serverOpen;
private long totalOpen;
}
public static TaskTimer getTimer(String name) {
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName() {
return METRIC_NAME;
}
}
| 5,571 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/startup/RaigadServer.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.startup;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.aws.SetVPCSecurityGroupID;
import com.netflix.raigad.aws.UpdateSecuritySettings;
import com.netflix.raigad.aws.UpdateTribeSecuritySettings;
import com.netflix.raigad.backup.RestoreBackupManager;
import com.netflix.raigad.backup.SnapshotBackupManager;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.defaultimpl.IElasticsearchProcess;
import com.netflix.raigad.identity.InstanceManager;
import com.netflix.raigad.indexmanagement.ElasticsearchIndexManager;
import com.netflix.raigad.monitoring.*;
import com.netflix.raigad.scheduler.RaigadScheduler;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.HttpModule;
import com.netflix.raigad.utils.Sleeper;
import com.netflix.raigad.utils.TuneElasticsearch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Start all tasks here: Property update task, Backup task, Restore task, Incremental backup
*/
@Singleton
public class RaigadServer {
private static final Logger logger = LoggerFactory.getLogger(RaigadServer.class);
private static final int ES_MONITORING_INITIAL_DELAY = 10;
private static final int ES_SNAPSHOT_INITIAL_DELAY = 100;
private static final int ES_HEALTH_MONITOR_DELAY = 600;
private static final int ES_NODE_HEALTH_MONITOR_DELAY = 10;
private final RaigadScheduler scheduler;
private final IConfiguration config;
private final Sleeper sleeper;
private final IElasticsearchProcess esProcess;
private final InstanceManager instanceManager;
private final ElasticsearchIndexManager esIndexManager;
private final SnapshotBackupManager snapshotBackupManager;
private final HttpModule httpModule;
private final SetVPCSecurityGroupID setVPCSecurityGroupID;
@Inject
public RaigadServer(IConfiguration config,
RaigadScheduler scheduler,
HttpModule httpModule,
IElasticsearchProcess esProcess,
Sleeper sleeper,
InstanceManager instanceManager,
ElasticsearchIndexManager esIndexManager,
SnapshotBackupManager snapshotBackupManager,
SetVPCSecurityGroupID setVPCSecurityGroupID) {
this.config = config;
this.scheduler = scheduler;
this.httpModule = httpModule;
this.esProcess = esProcess;
this.sleeper = sleeper;
this.instanceManager = instanceManager;
this.esIndexManager = esIndexManager;
this.snapshotBackupManager = snapshotBackupManager;
this.setVPCSecurityGroupID = setVPCSecurityGroupID;
}
public void initialize() throws Exception {
// Check if it's really needed
if (instanceManager.getInstance().isOutOfService()) {
return;
}
logger.info("Initializing Raigad server now...");
// Start to schedule jobs
scheduler.start();
if (!config.isLocalModeEnabled()) {
if (config.amITribeNode()) {
logger.info("Updating security setting for the tribe node");
if (config.isDeployedInVPC()) {
logger.info("Setting Security Group ID (VPC)");
setVPCSecurityGroupID.execute();
}
// Update security settings
scheduler.runTaskNow(UpdateTribeSecuritySettings.class);
// Sleep for 60 seconds for the SG update to happen
if (UpdateTribeSecuritySettings.firstTimeUpdated) {
sleeper.sleep(60 * 1000);
}
scheduler.addTask(UpdateTribeSecuritySettings.JOB_NAME,
UpdateTribeSecuritySettings.class,
UpdateTribeSecuritySettings.getTimer(instanceManager));
} else {
if (config.isSecurityGroupInMultiDC()) {
logger.info("Updating security setting");
if (config.isDeployedInVPC()) {
logger.info("Setting Security Group ID (VPC)");
setVPCSecurityGroupID.execute();
}
if (config.amISourceClusterForTribeNode()) {
// Update security settings
scheduler.runTaskNow(UpdateSecuritySettings.class);
// Sleep for 60 seconds for the SG update to happen
if (UpdateSecuritySettings.firstTimeUpdated) {
sleeper.sleep(60 * 1000);
}
scheduler.addTask(UpdateSecuritySettings.JOB_NAME,
UpdateSecuritySettings.class,
UpdateSecuritySettings.getTimer(instanceManager));
}
}
}
}
// Tune Elasticsearch
scheduler.runTaskNow(TuneElasticsearch.class);
logger.info("Trying to start Elasticsearch now...");
if (!config.doesElasticsearchStartManually()) {
// Start Elasticsearch
esProcess.start();
if (config.isRestoreEnabled()) {
scheduler.addTaskWithDelay(RestoreBackupManager.JOBNAME,
RestoreBackupManager.class,
RestoreBackupManager.getTimer(config),
config.getRestoreTaskInitialDelayInSeconds());
}
} else {
logger.info("config.doesElasticsearchStartManually() is set to True," +
"hence Elasticsearch needs to be started manually. " +
"Restore task needs to be started manually as well (if needed).");
}
/*
* Run the delayed task (after 10 seconds) to Monitor Elasticsearch Running Process
*/
scheduler.addTaskWithDelay(ElasticsearchProcessMonitor.JOB_NAME, ElasticsearchProcessMonitor.class, ElasticsearchProcessMonitor.getTimer(), ES_MONITORING_INITIAL_DELAY);
/*
* Run Snapshot Backup task
*/
if (config.isAsgBasedDedicatedDeployment()) {
if (config.getASGName().toLowerCase().contains("master")) {
// Run Snapshot task only on Master Nodes
scheduler.addTaskWithDelay(SnapshotBackupManager.JOBNAME, SnapshotBackupManager.class, SnapshotBackupManager.getTimer(config), ES_SNAPSHOT_INITIAL_DELAY);
// Run Index Management task only on Master Nodes
scheduler.addTaskWithDelay(ElasticsearchIndexManager.JOB_NAME, ElasticsearchIndexManager.class, ElasticsearchIndexManager.getTimer(config), config.getAutoCreateIndexInitialStartDelaySeconds());
scheduler.addTaskWithDelay(HealthMonitor.METRIC_NAME, HealthMonitor.class, HealthMonitor.getTimer("HealthMonitor"), ES_HEALTH_MONITOR_DELAY);
} else if (!config.reportMetricsFromMasterOnly()) {
scheduler.addTaskWithDelay(HealthMonitor.METRIC_NAME, HealthMonitor.class, HealthMonitor.getTimer("HealthMonitor"), ES_HEALTH_MONITOR_DELAY);
}
} else {
scheduler.addTaskWithDelay(SnapshotBackupManager.JOBNAME, SnapshotBackupManager.class, SnapshotBackupManager.getTimer(config), ES_SNAPSHOT_INITIAL_DELAY);
scheduler.addTaskWithDelay(ElasticsearchIndexManager.JOB_NAME, ElasticsearchIndexManager.class, ElasticsearchIndexManager.getTimer(config), config.getAutoCreateIndexInitialStartDelaySeconds());
scheduler.addTaskWithDelay(HealthMonitor.METRIC_NAME, HealthMonitor.class, HealthMonitor.getTimer("HealthMonitor"), ES_HEALTH_MONITOR_DELAY);
}
/*
* Starting Monitoring Jobs
*/
scheduler.addTask(ThreadPoolStatsMonitor.METRIC_NAME, ThreadPoolStatsMonitor.class, ThreadPoolStatsMonitor.getTimer("ThreadPoolStatsMonitor"));
scheduler.addTask(TransportStatsMonitor.METRIC_NAME, TransportStatsMonitor.class, TransportStatsMonitor.getTimer("TransportStatsMonitor"));
scheduler.addTask(NodeIndicesStatsMonitor.METRIC_NAME, NodeIndicesStatsMonitor.class, NodeIndicesStatsMonitor.getTimer("NodeIndicesStatsMonitor"));
scheduler.addTask(FsStatsMonitor.METRIC_NAME, FsStatsMonitor.class, FsStatsMonitor.getTimer("FsStatsMonitor"));
// TODO: 2X: Determine if this is necessary and if yes find an alternative
//scheduler.addTask(NetworkStatsMonitor.METRIC_NAME, NetworkStatsMonitor.class, NetworkStatsMonitor.getTimer("NetworkStatsMonitor"));
scheduler.addTask(JvmStatsMonitor.METRIC_NAME, JvmStatsMonitor.class, JvmStatsMonitor.getTimer("JvmStatsMonitor"));
scheduler.addTask(OsStatsMonitor.METRIC_NAME, OsStatsMonitor.class, OsStatsMonitor.getTimer("OsStatsMonitor"));
scheduler.addTask(ProcessStatsMonitor.METRIC_NAME, ProcessStatsMonitor.class, ProcessStatsMonitor.getTimer("ProcessStatsMonitor"));
scheduler.addTask(HttpStatsMonitor.METRIC_NAME, HttpStatsMonitor.class, HttpStatsMonitor.getTimer("HttpStatsMonitor"));
scheduler.addTask(AllCircuitBreakerStatsMonitor.METRIC_NAME, AllCircuitBreakerStatsMonitor.class, AllCircuitBreakerStatsMonitor.getTimer("AllCircuitBreakerStatsMonitor"));
scheduler.addTask(SnapshotBackupMonitor.METRIC_NAME, SnapshotBackupMonitor.class, SnapshotBackupMonitor.getTimer("SnapshotBackupMonitor"));
scheduler.addTaskWithDelay(NodeHealthMonitor.METRIC_NAME, NodeHealthMonitor.class, NodeHealthMonitor.getTimer("NodeHealthMonitor"), ES_NODE_HEALTH_MONITOR_DELAY);
}
public InstanceManager getInstanceManager() {
return instanceManager;
}
public RaigadScheduler getScheduler() {
return scheduler;
}
public IConfiguration getConfiguration() {
return config;
}
} | 5,572 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/AbstractRepository.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup;
import com.google.inject.ImplementedBy;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ImplementedBy(S3Repository.class)
public abstract class AbstractRepository {
private static final Logger logger = LoggerFactory.getLogger(AbstractRepository.class);
public enum RepositoryType {
s3, fs
}
protected final IConfiguration config;
protected final AbstractRepositorySettingsParams repositorySettingsParams;
@Inject
protected AbstractRepository(IConfiguration config, @Named("s3") AbstractRepositorySettingsParams repositorySettingsParams) {
this.config = config;
this.repositorySettingsParams = repositorySettingsParams;
}
/**
* Get Remote Repository Name
*/
public abstract String getRemoteRepositoryName();
public abstract String createOrGetSnapshotRepository() throws Exception;
public abstract void createRestoreRepository(String s3RepoName, String basePathSuffix) throws Exception;
public boolean doesRepositoryExists(String repositoryName, RepositoryType repositoryType) {
boolean doesRepoExists = false;
logger.info("Checking if repository <" + repositoryName + "> exists for type <" + repositoryType.name() + ">");
try {
Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient();
ClusterStateResponse clusterStateResponse = esTransportClient.admin().cluster().prepareState().clear().setMetaData(true).get();
MetaData metaData = clusterStateResponse.getState().getMetaData();
RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
if (repositoriesMetaData != null) {
for (RepositoryMetaData repositoryMetaData : repositoriesMetaData.repositories()) {
if (repositoryMetaData.name().equalsIgnoreCase(repositoryName) && repositoryMetaData.type().equalsIgnoreCase(repositoryType.name())) {
doesRepoExists = true;
break;
}
}
if (config.isDebugEnabled())
for (RepositoryMetaData repositoryMetaData : repositoriesMetaData.repositories())
logger.debug("Repository <" + repositoryMetaData.name() + ">");
}
if (doesRepoExists)
logger.info("Repository <" + repositoryName + "> already exists");
else
logger.info("Repository <" + repositoryName + "> does NOT exist");
} catch (Exception e) {
logger.warn("Exception thrown while listing Snapshot Repositories", e);
}
return doesRepoExists;
}
}
| 5,573 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/S3Repository.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.backup.exception.CreateRepositoryException;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.raigad.utils.SystemUtils;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* TODO: ADD following params to the repository
* The following settings are supported:
* <p>
* bucket: The name of the bucket to be used for snapshots (mandatory)
* region: The region where bucket is located, defaults to US Standard
* base_path: Specifies the path within bucket to repository data. Defaults to root directory
* access_key: The access key to use for authentication. Defaults to value of cloud.aws.access_key
* secret_key: The secret key to use for authentication. Defaults to value of cloud.aws.secret_key
* chunk_size: Big files can be broken down into chunks during the snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. 1g, 10m, 5k. Defaults to 100m.
* compress: When set to true metadata files are stored in compressed format. This setting doesn't affect index files that are already compressed by default. Defaults to false.
* server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm. Defaults to false.
* max_retries: Number of retries in case of S3 errors. Defaults to 3.
*/
@Singleton
public class S3Repository extends AbstractRepository {
private static final Logger logger = LoggerFactory.getLogger(S3Repository.class);
private static final String S3_REPO_DATE_FORMAT = "yyyyMMdd";
private static final DateTimeZone currentZone = DateTimeZone.UTC;
private RepositoryType type;
private AbstractRepositorySettingsParams repositorySettingsParams;
@Inject
private S3Repository(IConfiguration config, AbstractRepositorySettingsParams repositorySettingsParams) {
super(config, repositorySettingsParams);
this.type = RepositoryType.s3;
this.repositorySettingsParams = repositorySettingsParams;
}
/**
* 0.0.0.0:9200/_snapshot/s3_repo
* { "type": "s3",
* "settings": { "bucket": "us-east-1.es-test",
* "base_path": "es_abc/20140410",
* "region": "us-east-1"
* }
* }
*/
@Override
public String createOrGetSnapshotRepository() throws Exception {
String s3RepoName;
try {
s3RepoName = getRemoteRepositoryName();
logger.info("Snapshot repository name : <" + s3RepoName + ">");
//Set Snapshot Backup related parameters
repositorySettingsParams.setBackupParams();
//Check if Repository Exists
if (!doesRepositoryExists(s3RepoName, getRepositoryType())) {
createNewRepository(s3RepoName);
}
} catch (Exception e) {
throw new CreateRepositoryException("Failed creating snapshot repository!", e);
}
return s3RepoName;
}
@Override
public void createRestoreRepository(String s3RepoName, String basePathSuffix) throws Exception {
try {
// Set restore related parameters
repositorySettingsParams.setRestoreParams(basePathSuffix);
//Check if repository exists
createNewRepository(s3RepoName);
} catch (Exception e) {
throw new CreateRepositoryException("Failed creating restore repository!", e);
}
}
public void createNewRepository(String s3RepoName) throws Exception {
Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient();
//Creating new repository now
PutRepositoryResponse putRepositoryResponse = getPutRepositoryResponse(esTransportClient, s3RepoName);
if (putRepositoryResponse.isAcknowledged()) {
logger.info("Successfully created repository <" + s3RepoName + "> " + getRepoParamPrint());
} else {
throw new CreateRepositoryException("Failed creating repository failed <" + s3RepoName + "> " + getRepoParamPrint());
}
}
@Override
public String getRemoteRepositoryName() {
DateTime dateTime = new DateTime();
DateTime dateTimeGmt = dateTime.withZone(currentZone);
return SystemUtils.formatDate(dateTimeGmt, S3_REPO_DATE_FORMAT);
}
public RepositoryType getRepositoryType() {
return type;
}
public String getRepoParamPrint() {
return "bucket: <" + repositorySettingsParams.getBucket() + "> " +
"base_path: <" + repositorySettingsParams.getBase_path() + "> " +
"region: <" + repositorySettingsParams.getRegion() + ">";
}
/**
* Following method is isolated so that it helps in unit testing for mocking
*/
public PutRepositoryResponse getPutRepositoryResponse(Client esTransportClient, String s3RepoName) {
return esTransportClient.admin().cluster().preparePutRepository(s3RepoName)
.setType(getRepositoryType().name()).setSettings(Settings.builder()
.put("base_path", repositorySettingsParams.getBase_path())
.put("region", repositorySettingsParams.getRegion())
.put("bucket", repositorySettingsParams.getBucket())
).get();
}
}
| 5,574 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/SnapshotBackupManager.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.CronTimer;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.*;
import com.netflix.servo.monitor.*;
import org.apache.commons.lang.StringUtils;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@Singleton
public class SnapshotBackupManager extends Task {
private static final Logger logger = LoggerFactory.getLogger(SnapshotBackupManager.class);
public static String JOBNAME = "SnapshotBackupManager";
private final AbstractRepository repository;
private final HttpModule httpModule;
private final AtomicInteger snapshotSuccess = new AtomicInteger(0);
private final AtomicInteger snapshotFailure = new AtomicInteger(0);
private static final AtomicBoolean isSnapshotRunning = new AtomicBoolean(false);
private static final DateTimeZone currentZone = DateTimeZone.UTC;
private static final String S3_REPO_FOLDER_DATE_FORMAT = "yyyyMMddHHmm";
private static final String COMMA_SEPARATOR = ",";
private static Timer snapshotDuration = new BasicTimer(MonitorConfig.builder("snapshotDuration").withTag("class", "Elasticsearch_SnapshotBackupReporter").build(), TimeUnit.SECONDS);
static {
Monitors.registerObject(snapshotDuration);
}
@Inject
public SnapshotBackupManager(IConfiguration config, @Named("s3") AbstractRepository repository, HttpModule httpModule) {
super(config);
this.repository = repository;
this.httpModule = httpModule;
}
@Override
public void execute() {
try {
//Confirm if Current Node is a Master Node
if (ElasticsearchUtils.amIMasterNode(config, httpModule)) {
// If Elasticsearch is started then only start Snapshot Backup
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, hence not Starting Snapshot Operation";
logger.info(exceptionMsg);
return;
}
logger.info("Current node is the Master Node.");
if (!config.isSnapshotBackupEnabled()) {
logger.info("Snapshot Backup is disabled, hence can not start Snapshot Backup.");
return;
}
//Run Snapshot Backup
runSnapshotBackup();
} else {
if (config.isDebugEnabled())
logger.debug("Current node is not a Master Node yet, hence not running a Snapshot");
}
} catch (Exception e) {
snapshotFailure.incrementAndGet();
logger.warn("Exception thrown while running Snapshot Backup", e);
}
}
public void runSnapshotBackup() throws Exception {
// Create or Get Repository
String repositoryName = repository.createOrGetSnapshotRepository();
// StartBackup
String snapshotName = getSnapshotName(config.getCommaSeparatedIndicesToBackup(), config.includeIndexNameInSnapshot());
logger.info("Repository Name : <" + repositoryName + "> Snapshot Name : <" + snapshotName + "> Indices : <" + config.getCommaSeparatedIndicesToBackup() + "> \nRunning Snapshot now ... ");
Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient();
Stopwatch snapshotTimer = snapshotDuration.start();
//This is a blocking call. It'll wait until Snapshot is finished.
CreateSnapshotResponse createSnapshotResponse = getCreateSnapshotResponse(esTransportClient, repositoryName, snapshotName);
logger.info("Snapshot Status = " + createSnapshotResponse.status().toString());
if (createSnapshotResponse.status() == RestStatus.OK) {
//TODO Add Servo Monitoring so that it can be verified from dashboard
printSnapshotDetails(createSnapshotResponse);
snapshotSuccess.incrementAndGet();
} else if (createSnapshotResponse.status() == RestStatus.INTERNAL_SERVER_ERROR) {
//TODO Add Servo Monitoring so that it can be verified from dashboard
logger.info("Snapshot Completely Failed");
snapshotFailure.incrementAndGet();
}
//Stop the timer
snapshotTimer.stop();
}
//TODO: Map to Java Class and Create JSON
public void printSnapshotDetails(CreateSnapshotResponse createSnapshotResponse) {
StringBuilder builder = new StringBuilder();
builder.append("Snapshot Details:");
builder.append("\n\t Name = " + createSnapshotResponse.getSnapshotInfo().snapshotId().getName());
builder.append("\n\t Indices : ");
for (String index : createSnapshotResponse.getSnapshotInfo().indices()) {
builder.append("\n\t\t Index = " + index);
}
builder.append("\n\t Start Time = " + createSnapshotResponse.getSnapshotInfo().startTime());
builder.append("\n\t End Time = " + createSnapshotResponse.getSnapshotInfo().endTime());
long minuteDuration = (createSnapshotResponse.getSnapshotInfo().endTime() - createSnapshotResponse.getSnapshotInfo().startTime()) / (1000 * 60);
builder.append("\n\t Total Time Taken = " + minuteDuration + " Minutes");
builder.append("\n\t Total Shards = " + createSnapshotResponse.getSnapshotInfo().totalShards());
builder.append("\n\t Successful Shards = " + createSnapshotResponse.getSnapshotInfo().successfulShards());
builder.append("\n\t Total Failed Shards = " + createSnapshotResponse.getSnapshotInfo().failedShards());
if (createSnapshotResponse.getSnapshotInfo().failedShards() > 0) {
for (SnapshotShardFailure failedShard : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
builder.append("\n\t Failed Shards : ");
builder.append("\n\t\t Index = " + failedShard.index());
builder.append("\n\t\t Shard Id = " + failedShard.shardId());
builder.append("\n\t\t Node Id = " + failedShard.nodeId());
builder.append("\n\t\t Reason = " + failedShard.reason());
}
}
logger.info(builder.toString());
}
public static TaskTimer getTimer(IConfiguration config) {
if (config.isHourlySnapshotEnabled()) {
return new SimpleTimer(JOBNAME, config.getBackupCronTimerInSeconds() * 1000);
} else {
int hour = config.getBackupHour();
return new CronTimer(hour, 1, 0);
}
}
@Override
public String getName() {
return JOBNAME;
}
public String getSnapshotName(String indices, boolean includeIndexNameInSnapshot) {
StringBuilder snapshotName = new StringBuilder();
if (includeIndexNameInSnapshot) {
String indexName;
if (indices.toLowerCase().equals("all"))
indexName = "all";
else
indexName = StringUtils.replace(indices, ",", "_");
snapshotName.append(indexName).append("_");
}
DateTime dt = new DateTime();
DateTime dtGmt = dt.withZone(currentZone);
String snapshotDate = SystemUtils.formatDate(dtGmt, S3_REPO_FOLDER_DATE_FORMAT);
snapshotName.append(snapshotDate);
return snapshotName.toString();
}
public int getNumSnapshotSuccess() {
return snapshotSuccess.get();
}
public int getNumSnapshotFailure() {
return snapshotFailure.get();
}
public CreateSnapshotResponse getCreateSnapshotResponse(Client esTransportClient, String repositoryName, String snapshotName) {
return esTransportClient.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName)
.setWaitForCompletion(config.waitForCompletionOfBackup())
.setIndices(config.getCommaSeparatedIndicesToBackup().split(COMMA_SEPARATOR))
.setIncludeGlobalState(config.includeGlobalStateDuringBackup())
.setPartial(config.partiallyBackupIndices()).get();
}
// (esTransportClient.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state());//, equalTo(SnapshotState.SUCCESS));
/*
NON-Blocking SnapshotRequest
----------------------------
CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repositoryName,snapshotName);
esTransportClient.admin().cluster().createSnapshot(createSnapshotRequest
.indices(config.getCommaSeparatedIndicesToBackup())
.includeGlobalState(config.includeGlobalStateDuringBackup())
.waitForCompletion(config.waitForCompletionOfBackup()), new ActionListener<CreateSnapshotResponse>() {
@Override
public void onResponse(CreateSnapshotResponse createSnapshotResponse) {
logger.info("Time take for Snapshot = ["+(createSnapshotResponse.getSnapshotInfo().endTime()-createSnapshotResponse.getSnapshotInfo().startTime())+"] Seconds");
}
@Override
public void onFailure(Throwable e) {
logger.info("Snapshot Completely Failed");
}
});
*/
}
| 5,575 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/RestoreBackupManager.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.netflix.raigad.backup.exception.RestoreBackupException;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.raigad.utils.ElasticsearchTransportClient;
import com.netflix.raigad.utils.ElasticsearchUtils;
import com.netflix.raigad.utils.HttpModule;
import org.apache.commons.lang.StringUtils;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.rest.RestStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@Singleton
public class RestoreBackupManager extends Task {
private static final Logger logger = LoggerFactory.getLogger(RestoreBackupManager.class);
public static String JOBNAME = "RestoreBackupManager";
private final AbstractRepository repository;
private final HttpModule httpModule;
private static final AtomicBoolean isRestoreRunning = new AtomicBoolean(false);
private static final String ALL_INDICES_TAG = "_all";
private static final String SUFFIX_SEPARATOR_TAG = "-";
private static final String COMMA_SEPARATOR = ",";
@Inject
public RestoreBackupManager(IConfiguration config, @Named("s3") AbstractRepository repository, HttpModule httpModule) {
super(config);
this.repository = repository;
this.httpModule = httpModule;
}
@Override
public void execute() {
try {
//Confirm if Current Node is a Master Node
if (ElasticsearchUtils.amIMasterNode(config, httpModule)) {
// If Elasticsearch is started then only start Snapshot Backup
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, hence not Starting Restore Operation";
logger.info(exceptionMsg);
return;
}
logger.info("Current node is the Master Node. Running Restore now ...");
//TODO: Add Config properties for Rename Pattern and Rename Replacement
runRestore(config.getRestoreRepositoryName(),
config.getRestoreRepositoryType(),
config.getRestoreSnapshotName(),
config.getCommaSeparatedIndicesToRestore(), null, null);
} else {
logger.info("Current node is not a Master Node yet, hence not running a Restore");
}
} catch (Exception e) {
logger.warn("Exception thrown while running Restore Backup", e);
}
}
public void runRestore(String sourceRepositoryName, String repositoryType, String snapshotName, String indices, String renamePattern, String renameReplacement) throws Exception {
Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient();
// Get Repository Name : This will serve as BasePath Suffix
String sourceRepoName = StringUtils.isBlank(sourceRepositoryName) ? config.getRestoreRepositoryName() : sourceRepositoryName;
if (StringUtils.isBlank(sourceRepoName))
throw new RestoreBackupException("Repository Name is Null or Empty");
//Attach suffix to the repository name so that it does not conflict with Snapshot Repository name
String restoreRepositoryName = sourceRepoName + SUFFIX_SEPARATOR_TAG + config.getRestoreSourceClusterName();
String repoType = StringUtils.isBlank(repositoryType) ? config.getRestoreRepositoryType().toLowerCase() : repositoryType;
if (StringUtils.isBlank(repoType)) {
logger.info("RepositoryType is empty, hence Defaulting to <s3> type");
repoType = AbstractRepository.RepositoryType.s3.name();
}
if (!repository.doesRepositoryExists(restoreRepositoryName, AbstractRepository.RepositoryType.valueOf(repoType.toLowerCase()))) {
//If repository does not exist, create new one
repository.createRestoreRepository(restoreRepositoryName, sourceRepoName);
}
// Get Snapshot Name
String snapshotN = StringUtils.isBlank(snapshotName) ? config.getRestoreSnapshotName() : snapshotName;
if (StringUtils.isBlank(snapshotN)) {
//Pick the last Snapshot from the available Snapshots
List<String> snapshots = ElasticsearchUtils.getAvailableSnapshots(esTransportClient, restoreRepositoryName);
if (snapshots.isEmpty())
throw new RestoreBackupException("No available snapshots in <" + restoreRepositoryName + "> repository.");
//Sorting Snapshot names in Reverse Order
Collections.sort(snapshots, Collections.reverseOrder());
//Use the Last available snapshot
snapshotN = snapshots.get(0);
}
logger.info("Snapshot Name : <" + snapshotN + ">");
// Get Names of Indices
String commaSeparatedIndices = StringUtils.isBlank(indices) ? config.getCommaSeparatedIndicesToRestore() : indices;
if (StringUtils.isBlank(commaSeparatedIndices) || commaSeparatedIndices.equalsIgnoreCase(ALL_INDICES_TAG)) {
commaSeparatedIndices = null;
logger.info("Restoring all Indices.");
}
logger.info("Indices param : <" + commaSeparatedIndices + ">");
RestoreSnapshotResponse restoreSnapshotResponse = getRestoreSnapshotResponse(esTransportClient,
commaSeparatedIndices, restoreRepositoryName, snapshotN, renamePattern, renameReplacement);
logger.info("Restore Status = " + restoreSnapshotResponse.status().toString());
if (restoreSnapshotResponse.status() == RestStatus.OK) {
printRestoreDetails(restoreSnapshotResponse);
} else if (restoreSnapshotResponse.status() == RestStatus.INTERNAL_SERVER_ERROR)
logger.info("Restore Completely Failed");
}
//TODO: Map to Java Class and Create JSON
public void printRestoreDetails(RestoreSnapshotResponse restoreSnapshotResponse) {
StringBuilder builder = new StringBuilder();
builder.append("Restore Details:");
builder.append("\n\t Name = " + restoreSnapshotResponse.getRestoreInfo().name());
builder.append("\n\t Indices : ");
for (String index : restoreSnapshotResponse.getRestoreInfo().indices()) {
builder.append("\n\t\t Index = " + index);
}
builder.append("\n\t Total Shards = " + restoreSnapshotResponse.getRestoreInfo().totalShards());
builder.append("\n\t Successful Shards = " + restoreSnapshotResponse.getRestoreInfo().successfulShards());
builder.append("\n\t Total Failed Shards = " + restoreSnapshotResponse.getRestoreInfo().failedShards());
logger.info(builder.toString());
}
public static TaskTimer getTimer(IConfiguration config) {
return new SimpleTimer(JOBNAME);
}
@Override
public String getName() {
return JOBNAME;
}
public RestoreSnapshotResponse getRestoreSnapshotResponse(Client esTransportClient, String commaSeparatedIndices,
String restoreRepositoryName, String snapshotN,
String renamePattern, String renameReplacement) {
RestoreSnapshotRequestBuilder restoreSnapshotRequestBuilder;
if (commaSeparatedIndices != null && !commaSeparatedIndices.equalsIgnoreCase(ALL_INDICES_TAG)) {
//This is a blocking call. It'll wait until Restore is finished.
restoreSnapshotRequestBuilder = esTransportClient.admin().cluster().prepareRestoreSnapshot(restoreRepositoryName, snapshotN)
.setWaitForCompletion(true)
.setIndices(commaSeparatedIndices.split(COMMA_SEPARATOR)); //"test-idx-*", "-test-idx-2"
} else {
// Not Setting Indices explicitly -- Seems to be a bug in Elasticsearch
restoreSnapshotRequestBuilder = esTransportClient.admin().cluster().prepareRestoreSnapshot(restoreRepositoryName, snapshotN)
.setWaitForCompletion(true);
}
if ((renamePattern != null && renameReplacement != null) && (!renamePattern.isEmpty() || !renameReplacement.isEmpty())) {
logger.info("Rename Pattern = {}, Rename Replacement = {}", renamePattern, renameReplacement);
restoreSnapshotRequestBuilder.setRenamePattern(renamePattern).setRenameReplacement(renameReplacement);
}
return restoreSnapshotRequestBuilder.execute().actionGet();
}
}
| 5,576 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/S3RepositorySettingsParams.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.backup.exception.CreateRepositoryException;
import com.netflix.raigad.backup.exception.RestoreBackupException;
import com.netflix.raigad.configuration.IConfiguration;
import org.apache.commons.lang.StringUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
@Singleton
public class S3RepositorySettingsParams extends AbstractRepositorySettingsParams
{
private static final Logger logger = LoggerFactory.getLogger(S3RepositorySettingsParams.class);
private final char PATH_SEP = File.separatorChar;
private final String S3_REPO_DATE_FORMAT = "yyyyMMdd";
@Inject
public S3RepositorySettingsParams(IConfiguration config) {
super(config);
}
@Override
public void setBackupParams() throws CreateRepositoryException {
this.bucket = config.getBackupLocation();
if(StringUtils.isEmpty(this.bucket))
throw new CreateRepositoryException("Backup Location is not set in configuration.");
this.region = config.getDC();
this.base_path = getSnapshotBackupBasePath();
logger.info("Bucket : <"+bucket+"> Region : <"+region+"> Base_path : <"+base_path+">");
}
@Override
public void setRestoreParams(String basePathSuffix) throws RestoreBackupException {
if(StringUtils.isNotBlank(config.getRestoreLocation()))
this.bucket = config.getRestoreLocation();
else {
logger.info("config.getRestoreLocation() is Blank, hence setting bucket = config.getBackupLocation()");
this.bucket = config.getBackupLocation();
}
if(StringUtils.isNotBlank(config.getRestoreSourceRepositoryRegion()))
this.region = config.getRestoreSourceRepositoryRegion();
else {
logger.info("config.getRestoreSourceRepositoryRegion() is Blank, hence setting region = config.getDC()");
this.region = config.getDC();
}
this.base_path = getRestoreBackupBasePath(basePathSuffix);
logger.info("Bucket : <"+bucket+"> Region : <"+region+"> Base_path : <"+base_path+">");
}
//"base_path": "es_{current_cluster_name}/20140410"
public String getSnapshotBackupBasePath()
{
StringBuilder basePath = new StringBuilder();
basePath.append(config.getAppName());
basePath.append(PATH_SEP);
String repoSuffix = getS3RepositoryName();
basePath.append(repoSuffix);
logger.info("S3 Repository Snapshot Base Path : <"+basePath.toString()+">");
return basePath.toString();
}
/*
base_path = basePathPrefix + basePathSuffix
Here you can provide custom base_path *Prefix* instead of using default source_cluster_name
*/
//"base_path": "es_{source_cluster_name}/20140410"
public String getRestoreBackupBasePath(String basePathSuffix) throws RestoreBackupException {
StringBuilder basePath = new StringBuilder();
if(StringUtils.isNotBlank(config.getRestoreSourceClusterName()))
basePath.append(config.getRestoreSourceClusterName());
else
throw new RestoreBackupException("No Source Cluster for Restore yet chosen.");
basePath.append(PATH_SEP);
basePath.append(basePathSuffix);
logger.info("S3 Repository Restore Base Path : <"+basePath.toString()+">");
return basePath.toString();
}
public String getS3RepositoryName()
{
DateTime dt = new DateTime();
DateTime dtGmt = dt.withZone(DateTimeZone.UTC);
return formatDate(dtGmt,S3_REPO_DATE_FORMAT);
}
public String formatDate(DateTime dateTime, String dateFormat)
{
DateTimeFormatter fmt = DateTimeFormat.forPattern(dateFormat);
return dateTime.toString(fmt);
}
}
| 5,577 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/AbstractRepositorySettingsParams.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup;
import com.google.inject.ImplementedBy;
import com.netflix.raigad.backup.exception.CreateRepositoryException;
import com.netflix.raigad.backup.exception.RestoreBackupException;
import com.netflix.raigad.configuration.IConfiguration;
@ImplementedBy(S3RepositorySettingsParams.class)
public abstract class AbstractRepositorySettingsParams
{
/**
* 0.0.0.0:9200/_snapshot/20140410
* { "type": "s3",
* "settings": { "bucket": "us-east-1.netflix-cassandra-archive-test",
* "base_path": "es_abc/20140410",
* "region": "us-east-1"
* }
* }
*/
protected String bucket;
protected String base_path;
protected String region;
protected final IConfiguration config;
public AbstractRepositorySettingsParams(IConfiguration config)
{
this.config = config;
}
public abstract void setBackupParams() throws CreateRepositoryException;
public abstract void setRestoreParams(String basePathSuffix) throws RestoreBackupException;
public String getBucket() {
return bucket;
}
public String getBase_path() {
return base_path;
}
public String getRegion() {
return region;
}
}
| 5,578 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/MultipleMasterNodesException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class MultipleMasterNodesException extends Exception {
private static final long serialVersionUID = 1L;
public MultipleMasterNodesException(String msg, Throwable th)
{
super(msg, th);
}
public MultipleMasterNodesException(String msg)
{
super(msg);
}
public MultipleMasterNodesException(Exception ex)
{
super(ex);
}
public MultipleMasterNodesException(Throwable th)
{
super(th);
}
}
| 5,579 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/DuplicateRepositoryNameException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class DuplicateRepositoryNameException extends Exception {
private static final long serialVersionUID = 1L;
public DuplicateRepositoryNameException(String msg, Throwable th)
{
super(msg, th);
}
public DuplicateRepositoryNameException(String msg)
{
super(msg);
}
public DuplicateRepositoryNameException(Exception ex)
{
super(ex);
}
public DuplicateRepositoryNameException(Throwable th)
{
super(th);
}
}
| 5,580 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/CreateRepositoryException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class CreateRepositoryException extends Exception {
private static final long serialVersionUID = 1L;
public CreateRepositoryException(String msg, Throwable th)
{
super(msg, th);
}
public CreateRepositoryException(String msg)
{
super(msg);
}
public CreateRepositoryException(Exception ex)
{
super(ex);
}
public CreateRepositoryException(Throwable th)
{
super(th);
}
}
| 5,581 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/NoRepositoryException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class NoRepositoryException extends Exception {
private static final long serialVersionUID = 1L;
public NoRepositoryException(String msg, Throwable th)
{
super(msg, th);
}
public NoRepositoryException(String msg)
{
super(msg);
}
public NoRepositoryException(Exception ex)
{
super(ex);
}
public NoRepositoryException(Throwable th)
{
super(th);
}
}
| 5,582 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/NoMasterNodeException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class NoMasterNodeException extends Exception {
private static final long serialVersionUID = 1L;
public NoMasterNodeException(String msg, Throwable th)
{
super(msg, th);
}
public NoMasterNodeException(String msg)
{
super(msg);
}
public NoMasterNodeException(Exception ex)
{
super(ex);
}
public NoMasterNodeException(Throwable th)
{
super(th);
}
}
| 5,583 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/RestoreBackupException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class RestoreBackupException extends Exception {
private static final long serialVersionUID = 1L;
public RestoreBackupException(String msg, Throwable th)
{
super(msg, th);
}
public RestoreBackupException(String msg)
{
super(msg);
}
public RestoreBackupException(Exception ex)
{
super(ex);
}
public RestoreBackupException(Throwable th)
{
super(th);
}
}
| 5,584 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/SnapshotBackupException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.backup.exception;
public class SnapshotBackupException extends Exception {
private static final long serialVersionUID = 1L;
public SnapshotBackupException(String msg, Throwable th)
{
super(msg, th);
}
public SnapshotBackupException(String msg)
{
super(msg);
}
public SnapshotBackupException(Exception ex)
{
super(ex);
}
public SnapshotBackupException(Throwable th)
{
super(th);
}
}
| 5,585 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/IIndexNameFilter.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.indexmanagement;
public interface IIndexNameFilter {
boolean filter(String name);
}
| 5,586 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/IndexMetadata.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.indexmanagement;
import com.netflix.raigad.indexmanagement.exception.UnsupportedAutoIndexException;
import com.netflix.raigad.indexmanagement.indexfilters.DatePatternIndexNameFilter;
import org.codehaus.jackson.annotate.JsonCreator;
import org.codehaus.jackson.annotate.JsonProperty;
import org.joda.time.DateTime;
import org.joda.time.Period;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISOPeriodFormat;
public class IndexMetadata {
private static Period[] AMOUNTS = new Period[] {
Period.minutes(1),
Period.hours(1),
Period.days(1),
Period.weeks(1),
Period.months(1),
Period.years(1)
};
public enum RETENTION_TYPE {
HOURLY("YYYYMMddHH", "PT%dH"),
DAILY("YYYYMMdd", "P%dD"),
MONTHLY("YYYYMM", "P%dM"),
YEARLY("YYYY", "P%dY");
public final String datePattern;
public final String periodFormat;
RETENTION_TYPE(String datePattern, String periodFormat) {
this.datePattern = datePattern;
this.periodFormat = periodFormat;
}
}
private final String indexNamePattern;
private final DateTimeFormatter formatter;
private final Period retentionPeriod;
private final IIndexNameFilter indexNameFilter;
private final boolean preCreate;
@JsonCreator
public IndexMetadata(
@JsonProperty("indexName") String indexName,
@JsonProperty("indexNamePattern") String indexNamePattern,
@JsonProperty("retentionType") String retentionType,
@JsonProperty("retentionPeriod") String retentionPeriod,
@JsonProperty("preCreate") Boolean preCreate) throws UnsupportedAutoIndexException {
if (retentionType == null) {
retentionType = "DAILY";
}
RETENTION_TYPE retType = RETENTION_TYPE.valueOf(retentionType.toUpperCase());
// If legacy prefix is used, then quote it so it will be used as plain text in
// date pattern
String prefix = (indexName == null) ? "" : "'" + indexName + "'";
String namePattern = (indexNamePattern == null)
? prefix + retType.datePattern
: indexNamePattern;
this.indexNamePattern = (indexName == null && indexNamePattern == null)
? null
: namePattern;
this.formatter = DateTimeFormat.forPattern(namePattern).withZoneUTC();
this.indexNameFilter = new DatePatternIndexNameFilter(formatter);
if (retentionPeriod == null) {
this.retentionPeriod = null;
} else if (retentionPeriod.startsWith("P")) {
this.retentionPeriod = ISOPeriodFormat.standard().parsePeriod(retentionPeriod);
} else {
Integer num = Integer.parseInt(retentionPeriod);
String period = String.format(retType.periodFormat, num);
this.retentionPeriod = ISOPeriodFormat.standard().parsePeriod(period);
}
this.preCreate = preCreate == null ? false : preCreate;
}
@Override
public String toString() {
return String.format("{\"indexNamePattern\": \"%s\", \"retentionPeriod\": \"%s\", \"preCreate\": %b}",
indexNamePattern, retentionPeriod, preCreate);
}
public String getIndexNamePattern() {
return indexNamePattern;
}
public Period getRetentionPeriod() {
return retentionPeriod;
}
public IIndexNameFilter getIndexNameFilter() {
return indexNameFilter;
}
public boolean isPreCreate() {
return preCreate;
}
public boolean isActionable() {
return indexNamePattern != null && retentionPeriod != null;
}
public DateTime getPastRetentionCutoffDate(DateTime currentDateTime) {
// After computing the cutoff we print then reparse the cutoff time to round to
// the significant aspects of the time based on the formatter. For example:
//
// currentDateTime = 2018-02-03T23:47
// retentionPeriod = P2Y
// cutoff = 2016-02-03T23:47
//
// If the index pattern is yyyy, then a 2016 index would be before the cutoff so it
// would get dropped. We want to floor the cutoff time to only the significant aspects
// which for this example would be the year.
DateTime cutoff = currentDateTime.minus(retentionPeriod);
return formatter.parseDateTime(formatter.print(cutoff));
}
public DateTime getDateForIndexName(String name) {
return formatter.parseDateTime(name);
}
public String getIndexNameToPreCreate(DateTime currentDateTime) throws UnsupportedAutoIndexException {
String currentIndexName = formatter.print(currentDateTime);
for (int i = 0; i < AMOUNTS.length; ++i) {
String newIndexName = formatter.print(currentDateTime.plus(AMOUNTS[i]));
if (!currentIndexName.equals(newIndexName)) {
return newIndexName;
}
}
throw new UnsupportedAutoIndexException("Invalid date pattern, do not know how to pre create");
}
}
| 5,587 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/ElasticsearchIndexManager.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.indexmanagement;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.indexmanagement.exception.UnsupportedAutoIndexException;
import com.netflix.raigad.scheduler.CronTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.*;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.client.Client;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Index retention will delete indices older than certain date e.g. if the current date is 10/28/2014,
* retention period is 4, and given the following indices:
* <p>
* test_index20141024
* test_index20141025
* test_index20141026
* test_index20141027
* test_index20141028
* <p>
* Index to be deleted is test_index20141024.
* <p>
* If pre-create option is enabled, then one future index will be pre-created. Using the input data from above,
* the following index will be pre-created: test_index20141029
*/
@Singleton
public class ElasticsearchIndexManager extends Task {
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchIndexManager.class);
public static String JOB_NAME = "ElasticsearchIndexManager";
private final HttpModule httpModule;
@Inject
protected ElasticsearchIndexManager(IConfiguration config, HttpModule httpModule) {
super(config);
this.httpModule = httpModule;
}
Client getTransportClient() throws ElasticsearchTransportClientConnectionException {
return ElasticsearchTransportClient.instance(config).getTransportClient();
}
@Override
public void execute() {
try {
if (!config.isIndexAutoCreationEnabled()) {
logger.info("Index management is disabled");
return;
}
// Check is Elasticsearch is started
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
logger.info("Elasticsearch is not yet started, skipping index management");
return;
}
// Only active master can perform index management
if (!ElasticsearchUtils.amIMasterNode(config, httpModule)) {
if (config.isDebugEnabled()) {
logger.debug("Cannot perform index management: current node is not an active master node");
}
return;
}
runIndexManagement();
} catch (Exception e) {
logger.warn("Exception while performing index management", e);
}
}
public void runIndexManagement() throws Exception {
logger.info("Starting index management");
String serializedIndexMetadata = config.getIndexMetadata();
List<IndexMetadata> indexMetadataList;
try {
indexMetadataList = IndexUtils.parseIndexMetadata(serializedIndexMetadata);
} catch (Exception e) {
logger.error(String.format("Failed to build index metadata from %s", serializedIndexMetadata), e);
return;
}
Client esTransportClient = getTransportClient();
DateTime dateTime = new DateTime();
runIndexManagement(esTransportClient, indexMetadataList, dateTime);
}
void runIndexManagement(Client esTransportClient, List<IndexMetadata> indexMetadataList, DateTime dateTime) {
// Find all the indices
IndicesStatsResponse indicesStatsResponse = getIndicesStatsResponse(esTransportClient);
Map<String, IndexStats> indexStatsMap = indicesStatsResponse.getIndices();
if (indexStatsMap == null || indexStatsMap.isEmpty()) {
logger.info("Cluster is empty, no indices found");
return;
}
for (IndexMetadata indexMetadata : indexMetadataList) {
if (!indexMetadata.isActionable()) {
logger.warn(String.format("Index metadata %s is not actionable, skipping", indexMetadata));
continue;
}
try {
checkIndexRetention(esTransportClient, indexStatsMap.keySet(), indexMetadata, dateTime);
if (indexMetadata.isPreCreate()) {
preCreateIndex(esTransportClient, indexMetadata, dateTime);
}
} catch (Exception e) {
logger.error("Caught an exception while building index metadata information from configuration property", e);
return;
}
}
}
@Override
public String getName() {
return JOB_NAME;
}
public static TaskTimer getTimer(IConfiguration config) {
return new CronTimer(config.getAutoCreateIndexScheduleMinutes(), 0, JOB_NAME);
}
void checkIndexRetention(Client esTransportClient, Set<String> indices, IndexMetadata indexMetadata, DateTime dateTime) throws UnsupportedAutoIndexException {
// Calculate the past retention date
DateTime pastRetentionCutoffDate = indexMetadata.getPastRetentionCutoffDate(dateTime);
logger.info("Deleting indices that are older than {}", pastRetentionCutoffDate);
indices.forEach(indexName -> {
logger.info("Processing index [{}]", indexName);
if (indexMetadata.getIndexNameFilter().filter(indexName)) {
// Extract date from the index name
DateTime indexDate = indexMetadata.getDateForIndexName(indexName);
if (indexDate.isBefore(pastRetentionCutoffDate)) {
logger.info("Date {} for index {} is past the retention date of {}, deleting it", indexDate, indexName, pastRetentionCutoffDate);
deleteIndices(esTransportClient, indexName, config.getAutoCreateIndexTimeout());
}
}
});
}
void preCreateIndex(Client client, IndexMetadata indexMetadata, DateTime dateTime) throws UnsupportedAutoIndexException {
logger.info("Pre-creating indices for {}*", indexMetadata.getIndexNamePattern());
IndicesStatsResponse indicesStatsResponse = getIndicesStatsResponse(client);
Map<String, IndexStats> indexStatsMap = indicesStatsResponse.getIndices();
if (indexStatsMap == null || indexStatsMap.isEmpty()) {
logger.info("No existing indices, no need to pre-create");
return;
}
indexStatsMap.keySet().stream()
.filter(indexName -> indexMetadata.getIndexNameFilter().filter(indexName))
.findFirst()
.ifPresent(indexName -> {
try {
createIndex(client, indexMetadata.getIndexNameToPreCreate(dateTime));
} catch (UnsupportedAutoIndexException e) {
logger.error("Invalid index metadata: " + indexMetadata.toString(), e);
}
});
}
void createIndex(Client client, String indexName) {
if (!client.admin().indices().prepareExists(indexName).execute().actionGet(config.getAutoCreateIndexTimeout()).isExists()) {
client.admin().indices().prepareCreate(indexName).execute().actionGet(config.getAutoCreateIndexTimeout());
logger.info(indexName + " has been created");
} else {
logger.warn(indexName + " already exists");
}
}
void deleteIndices(Client client, String indexName, int timeout) {
DeleteIndexResponse deleteIndexResponse = client.admin().indices().prepareDelete(indexName).execute().actionGet(timeout);
if (deleteIndexResponse.isAcknowledged()) {
logger.info(indexName + " deleted");
} else {
logger.warn("Failed to delete " + indexName);
throw new RuntimeException("Failed to delete " + indexName);
}
}
/**
* Following method is isolated so that it helps in Unit Testing for Mocking
*
* @param esTransportClient
* @return
*/
IndicesStatsResponse getIndicesStatsResponse(Client esTransportClient) {
return esTransportClient.admin().indices().prepareStats("_all").execute().actionGet(config.getAutoCreateIndexTimeout());
}
}
| 5,588 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/IndexUtils.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.indexmanagement;
import com.netflix.raigad.objectmapper.DefaultIndexMapper;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import java.io.IOException;
import java.util.List;
public class IndexUtils {
/**
* Convert the JSON String of parameters to IndexMetadata objects
*
* @param serializedIndexMetadata : JSON string with parameters
* @return list of IndexMetadata objects
* @throws IOException
*/
public static List<IndexMetadata> parseIndexMetadata(String serializedIndexMetadata) throws IOException {
ObjectMapper jsonMapper = new DefaultIndexMapper();
TypeReference<List<IndexMetadata>> typeRef = new TypeReference<List<IndexMetadata>>() {};
return jsonMapper.readValue(serializedIndexMetadata, typeRef);
}
}
| 5,589 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/indexfilters/DatePatternIndexNameFilter.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.indexmanagement.indexfilters;
import com.netflix.raigad.indexmanagement.IIndexNameFilter;
import org.joda.time.MutableDateTime;
import org.joda.time.format.DateTimeFormatter;
public class DatePatternIndexNameFilter implements IIndexNameFilter {
private final DateTimeFormatter formatter;
public DatePatternIndexNameFilter(DateTimeFormatter formatter) {
this.formatter = formatter;
}
@Override
public boolean filter(String name) {
try {
MutableDateTime instant = new MutableDateTime();
int pos = formatter.parseInto(instant, name, 0);
return pos > 0
&& pos == name.length()
&& checkYear(instant)
&& reproducible(name, instant);
} catch (IllegalArgumentException e) {
return false;
}
}
private boolean checkYear(MutableDateTime instant) {
// When using a pattern like YYYY, it will match strings like 201802 as a large
// year. For our use-cases this is more likely a separate index with a year and
// month pattern. To avoid this the year is checked and rejected if more than four
// digits.
return instant.getYear() < 10000;
}
private boolean reproducible(String expected, MutableDateTime instant) {
// The date time parser is sometimes more lenient for parsing than what it would
// be able to generate. For example a pattern like YYYYMM would match both 20131
// and 201301. This check ensures that the printed form matches. So for the example
// 20131 would not match, but 201301 would.
String actual = formatter.print(instant);
return actual.equals(expected);
}
}
| 5,590 |
0 | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement | Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/exception/UnsupportedAutoIndexException.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.indexmanagement.exception;
public class UnsupportedAutoIndexException extends Exception {
private static final long serialVersionUID = 1L;
public UnsupportedAutoIndexException(String msg, Throwable th)
{
super(msg, th);
}
public UnsupportedAutoIndexException(String msg)
{
super(msg);
}
public UnsupportedAutoIndexException(Exception ex)
{
super(ex);
}
public UnsupportedAutoIndexException(Throwable th)
{
super(th);
}
}
| 5,591 |
0 | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/RaigadInstance.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.discovery;
import java.io.Serializable;
public class RaigadInstance implements Serializable {
private static final long serialVersionUID = 5606412386974488659L;
private String hostname;
private long updatetime;
private boolean outOfService;
private String Id;
private String app;
private String instanceId;
private String availabilityZone;
private String publicip;
private String dc;
private String asgName;
@Override
public String toString() {
return String
.format("Host name [%s], instance ID [%s], app [%s], AZ [%s], ID [%s], IP [%s], DC [%s], ASG [%s], update time [%s]",
getHostName(), getInstanceId(), getApp(),
getAvailabilityZone(), getId(), getHostIP(), getDC(), getAsg(), getUpdatetime());
}
public String getId() {
return Id;
}
public void setId(String id) {
this.Id = id;
}
public String getApp() {
return app;
}
public void setApp(String app) {
this.app = app;
}
public String getInstanceId() {
return instanceId;
}
public void setInstanceId(String instanceId) {
this.instanceId = instanceId;
}
public String getAvailabilityZone() {
return availabilityZone;
}
public void setAvailabilityZone(String availabilityZone) {
this.availabilityZone = availabilityZone;
}
public String getHostName() {
return hostname;
}
public String getHostIP() {
return publicip;
}
public void setHostName(String hostname) {
this.hostname = hostname;
}
public void setHostIP(String publicip) {
this.publicip = publicip;
}
public String getDC() {
return dc;
}
public void setDC(String dc) {
this.dc = dc;
}
public String getAsg() {
return asgName;
}
public void setAsg(String asgName) {
this.asgName = asgName;
}
public long getUpdatetime() {
return updatetime;
}
public void setUpdatetime(long updatetime) {
this.updatetime = updatetime;
}
public boolean isOutOfService() {
return outOfService;
}
public void setOutOfService(boolean outOfService) {
this.outOfService = outOfService;
}
}
| 5,592 |
0 | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/RaigadDiscoveryPlugin.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.discovery;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.transport.TransportService;
import java.util.Collections;
import java.util.Map;
import java.util.function.Supplier;
public class RaigadDiscoveryPlugin extends Plugin implements DiscoveryPlugin {
private static final Logger logger = Loggers.getLogger(RaigadDiscoveryPlugin.class);
private final Settings settings;
public RaigadDiscoveryPlugin(Settings settings) {
this.settings = settings;
logger.info("Starting Raigad discovery");
}
@Override
public Map<String, Supplier<UnicastHostsProvider>> getZenHostsProviders(
TransportService transportService, NetworkService networkService) {
return Collections.singletonMap(
"raigad",
() -> new RaigadUnicastHostsProvider(settings, transportService));
}
}
| 5,593 |
0 | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/RaigadUnicastHostsProvider.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.discovery;
import com.netflix.raigad.discovery.utils.DataFetcher;
import com.netflix.raigad.discovery.utils.ElasticsearchUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.List;
public class RaigadUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider {
private static final String GET_NODES_ISLAND_URL = "http://127.0.0.1:8080/Raigad/REST/v1/esconfig/get_nodes";
private static final String GET_NODES_TRIBE_URL_PREFIX = "http://127.0.0.1:8080/Raigad/REST/v1/esconfig/get_tribe_nodes/";
private final String nodeName;
private final TransportService transportService;
RaigadUnicastHostsProvider(Settings settings, TransportService transportService) {
super(settings);
this.transportService = transportService;
nodeName = settings.get("node.name");
logger.info("[raigad-discovery] Node name [{}]", nodeName);
}
@Override
public List<DiscoveryNode> buildDynamicNodes() {
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
try {
//Extract tribe ID from name field of settings and query accordingly
String discoveryNodesJsonString;
if (isTribeNode()) {
String tribeId = nodeName.substring(nodeName.indexOf("/") + 1);
logger.debug("[raigad-discovery] Tribe ID detected [{}]", tribeId);
discoveryNodesJsonString = DataFetcher.fetchData(GET_NODES_TRIBE_URL_PREFIX + tribeId, logger);
} else {
discoveryNodesJsonString = DataFetcher.fetchData(GET_NODES_ISLAND_URL, logger);
}
List<RaigadInstance> instances = ElasticsearchUtil.getRaigadInstancesFromJsonString(discoveryNodesJsonString, logger);
for (RaigadInstance instance : instances) {
try {
TransportAddress[] addresses = transportService.addressesFromString(instance.getHostIP(), 1);
if (addresses != null && addresses.length > 0) {
logger.info("[raigad-discovery] Adding instance [{}], address [{}], transport address [{}]",
instance.getId(), instance.getHostIP(), addresses[0]);
discoveryNodes.add(new DiscoveryNode(instance.getId(), addresses[0], Version.CURRENT.minimumCompatibilityVersion()));
}
} catch (Exception e) {
logger.warn("[raigad-discovery] Failed to add instance [{}], address [{}]", e, instance.getId(), instance.getHostIP());
}
}
} catch (Exception e) {
logger.error("[raigad-discovery] Exception while trying to build dynamic discovery nodes", e);
throw new RuntimeException(e);
}
logger.debug("[raigad-discovery] Using dynamic discovery nodes {}", discoveryNodes);
return discoveryNodes;
}
private boolean isTribeNode() {
if (nodeName == null || nodeName.isEmpty()) {
return false;
}
return nodeName.contains("/t");
}
}
| 5,594 |
0 | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/utils/ElasticsearchUtil.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.discovery.utils;
import com.netflix.raigad.discovery.RaigadInstance;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class ElasticsearchUtil {
private static final String TOP_LEVEL_ELEMENT = "instances";
private static final String ID = "id";
private static final String APP_NAME = "app_name";
private static final String HOST_NAME = "host_name";
private static final String INSTANCE_ID = "instance_id";
private static final String AVAILABILITY_ZONE = "availability_zone";
private static final String PUBLIC_IP = "public_ip";
private static final String DC = "dc";
private static final String UPDATE_TIME = "update_time";
@SuppressWarnings("unchecked")
public static List<RaigadInstance> getRaigadInstancesFromJsonString(String jsonInstances, Logger logger) {
List<RaigadInstance> raigadInstances = new ArrayList<RaigadInstance>();
try {
Map<String, Object> topLevelInstanceMap = (Map<String, Object>) jsonToMap(jsonInstances).get(TOP_LEVEL_ELEMENT);
for (String instanceKey : topLevelInstanceMap.keySet()) {
Map<String, Object> instParamMap = (Map<String, Object>) topLevelInstanceMap.get(instanceKey);
RaigadInstance raigadInstance = new RaigadInstance();
raigadInstance.setApp((String) instParamMap.get(APP_NAME));
raigadInstance.setAvailabilityZone((String) instParamMap.get(AVAILABILITY_ZONE));
raigadInstance.setDC((String) instParamMap.get(DC));
raigadInstance.setHostIP((String) instParamMap.get(PUBLIC_IP));
raigadInstance.setHostName((String) instParamMap.get(HOST_NAME));
raigadInstance.setId((String) instParamMap.get(ID));
raigadInstance.setInstanceId((String) instParamMap.get(INSTANCE_ID));
raigadInstance.setUpdatetime((Long) instParamMap.get(UPDATE_TIME));
logger.info("Raigad instance: {}", raigadInstance.toString());
//Add to the list
raigadInstances.add(raigadInstance);
}
} catch (IOException e) {
logger.error("Error caught while parsing JSON", e);
}
return raigadInstances;
}
private static Map<String, Object> jsonToMap(String jsonString) throws IOException {
try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, jsonString)) {
return parser.mapOrdered();
}
}
}
| 5,595 |
0 | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery | Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/utils/DataFetcher.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.discovery.utils;
import org.apache.logging.log4j.Logger;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.FilterInputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;
public class DataFetcher {
public static String fetchData(String url, Logger logger) {
HttpURLConnection httpConnection = null;
DataInputStream responseStream = null;
try {
httpConnection = (HttpURLConnection) new URL(url).openConnection();
httpConnection.setConnectTimeout(1000);
httpConnection.setReadTimeout(10000);
httpConnection.setRequestMethod("GET");
if (httpConnection.getResponseCode() != 200) {
logger.error("Unable to get data from URL [" + url + "]");
throw new RuntimeException("Unable to fetch data from Raigad API");
}
byte[] bytes = new byte[2048];
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
responseStream = new DataInputStream((FilterInputStream) httpConnection.getContent());
int bytesRead;
while ((bytesRead = responseStream.read(bytes, 0, bytes.length)) != -1) {
byteArrayOutputStream.write(bytes, 0, bytesRead);
}
String result = new String(byteArrayOutputStream.toByteArray(), StandardCharsets.UTF_8);
logger.info("Raigad ({}) returned {}", url, result);
return result;
} catch (Exception ex) {
throw new RuntimeException(ex);
} finally {
try {
if (responseStream != null) {
responseStream.close();
}
} catch (Exception e) {
logger.warn("Failed to close response stream from Raigad", e);
}
if (httpConnection != null)
httpConnection.disconnect();
}
}
}
| 5,596 |
0 | Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein | Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/SchedulerTest.java | /**
* Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license
* information.
*/
package com.airbnb.dynein.scheduler;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import com.airbnb.conveyor.async.AsyncSqsClient;
import com.airbnb.dynein.api.DyneinJobSpec;
import com.airbnb.dynein.api.JobSchedulePolicy;
import com.airbnb.dynein.api.JobScheduleType;
import com.airbnb.dynein.common.job.JacksonJobSpecTransformer;
import com.airbnb.dynein.common.job.JobSpecTransformer;
import com.airbnb.dynein.common.token.JacksonTokenManager;
import com.airbnb.dynein.common.token.TokenManager;
import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class SchedulerTest {
@Mock private AsyncSqsClient asyncClient;
@Mock private ScheduleManager scheduleManager;
private Scheduler scheduler;
private JobSpecTransformer jobSpecTransformer;
@Before
public void setUp() {
ObjectMapper mapper = new ObjectMapper();
jobSpecTransformer = new JacksonJobSpecTransformer(mapper);
TokenManager tokenManager = new JacksonTokenManager(mapper);
scheduler =
new Scheduler(
asyncClient,
"inbound-test",
jobSpecTransformer,
tokenManager,
scheduleManager,
Clock.fixed(Instant.now(), ZoneId.of("UTC")),
new NoOpMetricsImpl());
}
@Test
public void testScheduledJob() {
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("test-queue")
.schedulePolicy(
JobSchedulePolicy.builder()
.type(JobScheduleType.SCHEDULED)
.delayMillis(1000L)
.build())
.build();
when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test"))
.thenReturn(CompletableFuture.completedFuture(null));
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test");
assertNull(ret.join());
}
@Test
public void testImmediateJob() {
when(asyncClient.add(any(String.class), eq("test-queue")))
.thenReturn(CompletableFuture.completedFuture(null));
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("test-queue")
.schedulePolicy(
JobSchedulePolicy.builder().type(JobScheduleType.IMMEDIATE).delayMillis(0L).build())
.build();
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-queue");
assertNull(ret.join());
}
@Test
public void testSQSDelayedJob_withInstant() {
when(asyncClient.add(any(String.class), eq("test-queue"), any(Integer.class)))
.thenReturn(CompletableFuture.completedFuture(null));
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("test-queue")
.schedulePolicy(
JobSchedulePolicy.builder()
.type(JobScheduleType.SQS_DELAYED)
.delayMillis(5000L)
.build())
.build();
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-queue", 5);
assertNull(ret.join());
}
@Test
public void testSQSDelayedJob_withDelay() {
when(asyncClient.add(any(String.class), eq("test-queue"), any(Integer.class)))
.thenReturn(CompletableFuture.completedFuture(null));
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("test-queue")
.schedulePolicy(
JobSchedulePolicy.builder()
.type(JobScheduleType.SQS_DELAYED)
.epochMillis(Instant.now().plusMillis(5000L).toEpochMilli())
.build())
.build();
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-queue", 5);
assertNull(ret.join());
}
@Test(expected = IllegalArgumentException.class)
public void testSQSDelayedJob_DelayTooLong() throws Throwable {
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("error-queue")
.schedulePolicy(
JobSchedulePolicy.builder()
.type(JobScheduleType.SQS_DELAYED)
.epochMillis(Instant.now().plusMillis(1000000L).toEpochMilli())
.build())
.build();
try {
scheduler.createJob(jobSpec).join();
} catch (Exception e) {
Throwable t = e;
while (t.getCause() != null) {
t = t.getCause();
}
throw t;
}
}
@Test
public void testScheduledJobFailure() {
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("error-queue")
.schedulePolicy(
JobSchedulePolicy.builder()
.type(JobScheduleType.SCHEDULED)
.delayMillis(1000L)
.build())
.build();
CompletableFuture<Void> error = new CompletableFuture<>();
error.completeExceptionally(new Exception());
when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test"))
.thenReturn(error);
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test");
try {
ret.get(1000, TimeUnit.MILLISECONDS);
} catch (TimeoutException timeout) {
fail("Future does not seem to complete when failure in adding to inbound queue (SCHEDULED).");
} catch (Exception ex) {
}
}
@Test
public void testImmediateJobFailure() {
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("test-error")
.schedulePolicy(
JobSchedulePolicy.builder().type(JobScheduleType.IMMEDIATE).delayMillis(0L).build())
.build();
CompletableFuture<Void> error = new CompletableFuture<>();
error.completeExceptionally(new Exception());
when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error"))
.thenReturn(error);
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error");
try {
ret.get(1000, TimeUnit.MILLISECONDS);
} catch (TimeoutException timeout) {
fail(
"Future does not seem to complete when failure in adding to destination queue (IMMEDIATE).");
} catch (Exception ex) {
}
}
@Test
public void testSQSDelayedJobFailure() {
DyneinJobSpec jobSpec =
DyneinJobSpec.builder()
.name("AddJob")
.queueName("test-error")
.schedulePolicy(
JobSchedulePolicy.builder()
.type(JobScheduleType.SQS_DELAYED)
.epochMillis(Instant.now().plusMillis(5000L).toEpochMilli())
.build())
.build();
CompletableFuture<Void> error = new CompletableFuture<>();
error.completeExceptionally(new Exception());
when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error", 5))
.thenReturn(error);
CompletableFuture<Void> ret = scheduler.createJob(jobSpec);
verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error", 5);
try {
ret.get(1000, TimeUnit.MILLISECONDS);
} catch (TimeoutException timeout) {
fail(
"Future does not seem to complete when failure in adding to destination queue (SQS_DELAYED).");
} catch (Exception ex) {
}
}
}
| 5,597 |
0 | Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein | Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/SchedulerManagerTest.java | /**
* Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license
* information.
*/
package com.airbnb.dynein.scheduler;
import com.airbnb.dynein.api.*;
import com.airbnb.dynein.common.job.JacksonJobSpecTransformer;
import com.airbnb.dynein.common.job.JobSpecTransformer;
import com.airbnb.dynein.common.token.JacksonTokenManager;
import com.airbnb.dynein.common.token.TokenManager;
import com.airbnb.dynein.scheduler.Schedule.JobStatus;
import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class SchedulerManagerTest {
private static final byte[] SERIALIZED_JOB_DATA = {0, 0, 0, 0};
private JobSpecTransformer transformer;
private TokenManager tokenManager;
private ScheduleManager scheduleManager;
private Clock clock;
@Before
public void setUp() {
ObjectMapper mapper = new ObjectMapper();
transformer = new JacksonJobSpecTransformer(mapper);
tokenManager = new JacksonTokenManager(mapper);
clock = Clock.fixed(Instant.now(), ZoneId.of("UTC"));
int maxShardId = 64;
scheduleManager =
new NoOpScheduleManager(
maxShardId, tokenManager, transformer, clock, new NoOpMetricsImpl());
}
private DyneinJobSpec getTestJobSpec(String token) {
JobSchedulePolicy policy =
JobSchedulePolicy.builder()
.type(JobScheduleType.SCHEDULED)
.epochMillis(Instant.now(clock).plusSeconds(1000).toEpochMilli())
.build();
return DyneinJobSpec.builder()
.jobToken(token)
.name("AddJob")
.queueType("PRODUCTION")
.queueName("test-queue")
.createAtInMillis(Instant.now().minusMillis(10).toEpochMilli())
.schedulePolicy(policy)
.serializedJob(SERIALIZED_JOB_DATA)
.build();
}
/**
* This test is to ensure that we always use the scheduled time in the jobSpec to make the {@code
* Schedule} rather than the one in the token.
*/
@Test
public void testMakeSchedule() throws InvalidTokenException {
String token =
tokenManager.generateToken(1L, "test-cluster", Instant.now(clock).toEpochMilli());
String serializedJobSpec = transformer.serializeJobSpec(getTestJobSpec(token));
Schedule schedule = scheduleManager.makeSchedule(serializedJobSpec);
Assert.assertEquals(
schedule,
new Schedule(
Instant.now(clock).plusSeconds(1000).toEpochMilli() + "#" + token,
JobStatus.SCHEDULED,
serializedJobSpec,
"1"));
}
}
| 5,598 |
0 | Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein | Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/NoOpScheduleManager.java | /**
* Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license
* information.
*/
package com.airbnb.dynein.scheduler;
import com.airbnb.dynein.common.job.JobSpecTransformer;
import com.airbnb.dynein.common.token.TokenManager;
import com.airbnb.dynein.scheduler.Schedule.JobStatus;
import com.airbnb.dynein.scheduler.metrics.Metrics;
import java.time.Clock;
import java.time.Instant;
import java.util.concurrent.CompletableFuture;
public class NoOpScheduleManager extends ScheduleManager {
public NoOpScheduleManager(
int maxShardId,
TokenManager tokenManager,
JobSpecTransformer jobSpecTransformer,
Clock clock,
Metrics metrics) {
super(maxShardId, tokenManager, jobSpecTransformer, clock, metrics);
}
@Override
public CompletableFuture<Void> recoverStuckJobs(String partition, Instant instant) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> addJob(Schedule schedule) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Schedule> getJob(String token) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> deleteJob(String token) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<SchedulesQueryResponse> getOverdueJobs(
String partition, Instant instant) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Schedule> updateStatus(
Schedule schedule, JobStatus oldStatus, JobStatus newStatus) {
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> deleteDispatchedJob(Schedule schedule) {
return CompletableFuture.completedFuture(null);
}
@Override
public void close() {}
}
| 5,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.