index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorEmailBuilder.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.JanitorEmailBuilder;
/** The basic implementation of the email builder for Janitor monkey. */
public class BasicJanitorEmailBuilder extends JanitorEmailBuilder {
private static final String[] TABLE_COLUMNS =
{"Resource Type", "Resource", "Region", "Description", "Expected Termination Time",
"Termination Reason", "View/Edit"};
private static final String AHREF_TEMPLATE = "<a href=\"%s\">%s</a>";
private static final DateTimeFormatter DATE_FORMATTER = DateTimeFormat.forPattern("EEE, MMM dd, yyyy");
private Map<String, Collection<Resource>> emailToResources;
@Override
public void setEmailToResources(Map<String, Collection<Resource>> emailToResources) {
Validate.notNull(emailToResources);
this.emailToResources = emailToResources;
}
@Override
protected String getHeader() {
StringBuilder header = new StringBuilder();
header.append("<b><h2>Janitor Notifications</h2></b>");
header.append(
"The following resource(s) have been marked for cleanup by Janitor monkey "
+ "as potential unused resources. This is a non-repeating notification.<br/>");
return header.toString();
}
@Override
protected String getEntryTable(String emailAddress) {
StringBuilder table = new StringBuilder();
table.append(getHtmlTableHeader(getTableColumns()));
for (Resource resource : emailToResources.get(emailAddress)) {
table.append(getResourceRow(resource));
}
table.append("</table>");
return table.toString();
}
@Override
protected String getFooter() {
return "<br/>Janitor Monkey wiki: https://github.com/Netflix/SimianArmy/wiki<br/>";
}
/**
* Gets the url to view the details of the resource.
* @param resource the resource
* @return the url to view/edit the resource.
*/
protected String getResourceUrl(Resource resource) {
return null;
}
/**
* Gets the string when displaying the resource, e.g. the id.
* @param resource the resource to display
* @return the string to represent the resource
*/
protected String getResourceDisplay(Resource resource) {
return resource.getId();
}
/**
* Gets the url to edit the Janitor termination of the resource.
* @param resource the resource
* @return the url to edit the Janitor termination the resource.
*/
protected String getJanitorResourceUrl(Resource resource) {
return null;
}
/** Gets the table columns for the table in the email.
*
* @return the array of column names
*/
protected String[] getTableColumns() {
return TABLE_COLUMNS;
}
/**
* Gets the row for a resource in the table in the email body.
* @param resource the resource to display
* @return the table row in the email body
*/
protected String getResourceRow(Resource resource) {
StringBuilder message = new StringBuilder();
message.append("<tr>");
message.append(getHtmlCell(resource.getResourceType().name()));
String resourceUrl = getResourceUrl(resource);
if (!StringUtils.isEmpty(resourceUrl)) {
message.append(getHtmlCell(String.format(AHREF_TEMPLATE, resourceUrl, getResourceDisplay(resource))));
} else {
message.append(getHtmlCell(getResourceDisplay(resource)));
}
message.append(getHtmlCell(resource.getRegion()));
if (resource.getDescription() == null) {
message.append(getHtmlCell(""));
} else {
message.append(getHtmlCell(resource.getDescription().replace(";", "<br/>").replace(",", "<br/>")));
}
message.append(getHtmlCell(DATE_FORMATTER.print(resource.getExpectedTerminationTime().getTime())));
message.append(getHtmlCell(resource.getTerminationReason()));
String janitorUrl = getJanitorResourceUrl(resource);
if (!StringUtils.isEmpty(janitorUrl)) {
message.append(getHtmlCell(String.format(AHREF_TEMPLATE, janitorUrl, "View/Extend")));
} else {
message.append(getHtmlCell(""));
}
message.append("</tr>");
return message.toString();
}
}
| 4,800 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/janitor/BasicVolumeTaggingMonkeyContext.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.apache.commons.lang.StringUtils;
import java.util.Collection;
/** The basic context for the monkey that tags volumes with Janitor meta data.
*/
public class BasicVolumeTaggingMonkeyContext extends BasicSimianArmyContext implements VolumeTaggingMonkey.Context {
private final Collection<AWSClient> awsClients = Lists.newArrayList();
/**
* The constructor.
*/
public BasicVolumeTaggingMonkeyContext() {
super("simianarmy.properties", "client.properties", "volumeTagging.properties");
for (String r : StringUtils.split(region(), ",")) {
createClient(r);
awsClients.add(awsClient());
}
}
@Override
public Collection<AWSClient> awsClients() {
return awsClients;
}
}
| 4,801 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorMonkeyContext.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// CHECKSTYLE IGNORE MagicNumberCheck
package com.netflix.simianarmy.basic.janitor;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.aws.janitor.*;
import com.netflix.simianarmy.aws.janitor.crawler.*;
import com.netflix.simianarmy.aws.janitor.crawler.edda.*;
import com.netflix.simianarmy.aws.janitor.rule.ami.UnusedImageRule;
import com.netflix.simianarmy.aws.janitor.rule.asg.*;
import com.netflix.simianarmy.aws.janitor.rule.elb.OrphanedELBRule;
import com.netflix.simianarmy.aws.janitor.rule.generic.TagValueExclusionRule;
import com.netflix.simianarmy.aws.janitor.rule.generic.UntaggedRule;
import com.netflix.simianarmy.aws.janitor.rule.instance.OrphanedInstanceRule;
import com.netflix.simianarmy.aws.janitor.rule.launchconfig.OldUnusedLaunchConfigRule;
import com.netflix.simianarmy.aws.janitor.rule.snapshot.NoGeneratedAMIRule;
import com.netflix.simianarmy.aws.janitor.rule.volume.DeleteOnTerminationRule;
import com.netflix.simianarmy.aws.janitor.rule.volume.OldDetachedVolumeRule;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* The basic implementation of the context class for Janitor monkey.
*/
public class BasicJanitorMonkeyContext extends BasicSimianArmyContext implements JanitorMonkey.Context {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicJanitorMonkeyContext.class);
/** The email notifier. */
private final JanitorEmailNotifier emailNotifier;
private final JanitorResourceTracker janitorResourceTracker;
/** The janitors. */
private final List<AbstractJanitor> janitors;
private final String monkeyRegion;
private final MonkeyCalendar monkeyCalendar;
private final AmazonSimpleEmailServiceClient sesClient;
private final JanitorEmailBuilder janitorEmailBuilder;
private final String defaultEmail;
private final String[] ccEmails;
private final String sourceEmail;
private final String ownerEmailDomain;
private final int daysBeforeTermination;
/**
* The constructor.
*/
public BasicJanitorMonkeyContext() {
super("simianarmy.properties", "client.properties", "janitor.properties");
monkeyRegion = region();
monkeyCalendar = calendar();
String resourceDomain = configuration().getStrOrElse("simianarmy.janitor.resources.sdb.domain", "SIMIAN_ARMY");
Set<String> enabledResourceSet = getEnabledResourceSet();
String dbDriver = configuration().getStr("simianarmy.recorder.db.driver");
String dbUser = configuration().getStr("simianarmy.recorder.db.user");
String dbPass = configuration().getStr("simianarmy.recorder.db.pass");
String dbUrl = configuration().getStr("simianarmy.recorder.db.url");
String dbTable = configuration().getStr("simianarmy.janitor.resources.db.table");
if (dbDriver == null) {
janitorResourceTracker = new SimpleDBJanitorResourceTracker(awsClient(), resourceDomain);
} else {
RDSJanitorResourceTracker rdsTracker = new RDSJanitorResourceTracker(dbDriver, dbUser, dbPass, dbUrl, dbTable);
rdsTracker.init();
janitorResourceTracker = rdsTracker;
}
janitorEmailBuilder = new BasicJanitorEmailBuilder();
sesClient = new AmazonSimpleEmailServiceClient();
if (configuration().getStr("simianarmy.aws.email.region") != null) {
sesClient.setRegion(Region.getRegion(Regions.fromName(configuration().getStr("simianarmy.aws.email.region"))));
}
defaultEmail = configuration().getStrOrElse("simianarmy.janitor.notification.defaultEmail", "");
ccEmails = StringUtils.split(
configuration().getStrOrElse("simianarmy.janitor.notification.ccEmails", ""), ",");
sourceEmail = configuration().getStrOrElse("simianarmy.janitor.notification.sourceEmail", "");
ownerEmailDomain = configuration().getStrOrElse("simianarmy.janitor.notification.ownerEmailDomain", "");
daysBeforeTermination =
(int) configuration().getNumOrElse("simianarmy.janitor.notification.daysBeforeTermination", 3);
emailNotifier = new JanitorEmailNotifier(getJanitorEmailNotifierContext());
janitors = new ArrayList<AbstractJanitor>();
if (enabledResourceSet.contains("ASG")) {
janitors.add(getASGJanitor());
}
if (enabledResourceSet.contains("INSTANCE")) {
janitors.add(getInstanceJanitor());
}
if (enabledResourceSet.contains("EBS_VOLUME")) {
janitors.add(getEBSVolumeJanitor());
}
if (enabledResourceSet.contains("EBS_SNAPSHOT")) {
janitors.add(getEBSSnapshotJanitor());
}
if (enabledResourceSet.contains("LAUNCH_CONFIG")) {
janitors.add(getLaunchConfigJanitor());
}
if (enabledResourceSet.contains("IMAGE")) {
janitors.add(getImageJanitor());
}
if (enabledResourceSet.contains("ELB")) {
janitors.add(getELBJanitor());
}
}
protected JanitorRuleEngine createJanitorRuleEngine() {
JanitorRuleEngine ruleEngine = new BasicJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.TagValueExclusionRule.enabled", false)) {
String tagsList = configuration().getStr("simianarmy.janitor.rule.TagValueExclusionRule.tags");
String valsList = configuration().getStr("simianarmy.janitor.rule.TagValueExclusionRule.vals");
if (tagsList != null && valsList != null) {
TagValueExclusionRule rule = new TagValueExclusionRule(tagsList.split(","), valsList.split(","));
ruleEngine.addExclusionRule(rule);
}
}
return ruleEngine;
}
private ASGJanitor getASGJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
boolean discoveryEnabled = configuration().getBoolOrElse("simianarmy.janitor.Eureka.enabled", false);
ASGInstanceValidator instanceValidator;
if (discoveryEnabled) {
LOGGER.info("Initializing Discovery client.");
Injector injector = Guice.createInjector(new EurekaModule());
DiscoveryClient discoveryClient = injector.getInstance(DiscoveryClient.class);
instanceValidator = new DiscoveryASGInstanceValidator(discoveryClient);
} else {
LOGGER.info("Discovery/Eureka is not enabled, use the dummy instance validator.");
instanceValidator = new DummyASGInstanceValidator();
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.oldEmptyASGRule.enabled", false)) {
ruleEngine.addRule(new OldEmptyASGRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldEmptyASGRule.launchConfigAgeThreshold", 50),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldEmptyASGRule.retentionDays", 10),
instanceValidator
));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.suspendedASGRule.enabled", false)) {
ruleEngine.addRule(new SuspendedASGRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.suspendedASGRule.suspensionAgeThreshold", 2),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.suspendedASGRule.retentionDays", 5),
instanceValidator
));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("ASG")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler crawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
crawler = new EddaASGJanitorCrawler(createEddaClient(), awsClient().region());
} else {
crawler = new ASGJanitorCrawler(awsClient());
}
BasicJanitorContext asgJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, crawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new ASGJanitor(awsClient(), asgJanitorCtx);
}
private InstanceJanitor getInstanceJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.orphanedInstanceRule.enabled", false)) {
ruleEngine.addRule(new OrphanedInstanceRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.instanceAgeThreshold", 2),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.retentionDaysWithoutOwner",
8),
configuration().getBoolOrElse(
"simianarmy.janitor.rule.orphanedInstanceRule.opsworks.parentage",
false)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("INSTANCE")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler instanceCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
instanceCrawler = new EddaInstanceJanitorCrawler(createEddaClient(), awsClient().region());
} else {
instanceCrawler = new InstanceJanitorCrawler(awsClient());
}
BasicJanitorContext instanceJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, instanceCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new InstanceJanitor(awsClient(), instanceJanitorCtx);
}
private EBSVolumeJanitor getEBSVolumeJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.oldDetachedVolumeRule.enabled", false)) {
ruleEngine.addRule(new OldDetachedVolumeRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldDetachedVolumeRule.detachDaysThreshold", 30),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldDetachedVolumeRule.retentionDays", 7)));
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)
&& configuration().getBoolOrElse("simianarmy.janitor.rule.deleteOnTerminationRule.enabled", false)) {
ruleEngine.addRule(new DeleteOnTerminationRule(monkeyCalendar, (int) configuration().getNumOrElse(
"simianarmy.janitor.rule.deleteOnTerminationRule.retentionDays", 3)));
}
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("EBS_VOLUME")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler volumeCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
volumeCrawler = new EddaEBSVolumeJanitorCrawler(createEddaClient(), awsClient().region());
} else {
volumeCrawler = new EBSVolumeJanitorCrawler(awsClient());
}
BasicJanitorContext volumeJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, volumeCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new EBSVolumeJanitor(awsClient(), volumeJanitorCtx);
}
private EBSSnapshotJanitor getEBSSnapshotJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.noGeneratedAMIRule.enabled", false)) {
ruleEngine.addRule(new NoGeneratedAMIRule(monkeyCalendar,
(int) configuration().getNumOrElse("simianarmy.janitor.rule.noGeneratedAMIRule.ageThreshold", 30),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.noGeneratedAMIRule.retentionDays", 7),
configuration().getStrOrElse(
"simianarmy.janitor.rule.noGeneratedAMIRule.ownerEmail", null)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("EBS_SNAPSHOT")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler snapshotCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
snapshotCrawler = new EddaEBSSnapshotJanitorCrawler(
configuration().getStr("simianarmy.janitor.snapshots.ownerId"),
createEddaClient(), awsClient().region());
} else {
snapshotCrawler = new EBSSnapshotJanitorCrawler(awsClient());
}
BasicJanitorContext snapshotJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, snapshotCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new EBSSnapshotJanitor(awsClient(), snapshotJanitorCtx);
}
private LaunchConfigJanitor getLaunchConfigJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.oldUnusedLaunchConfigRule.enabled", false)) {
ruleEngine.addRule(new OldUnusedLaunchConfigRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldUnusedLaunchConfigRule.ageThreshold", 4),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.oldUnusedLaunchConfigRule.retentionDays", 3)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("LAUNCH_CONFIG")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
JanitorCrawler crawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
crawler = new EddaLaunchConfigJanitorCrawler(
createEddaClient(), awsClient().region());
} else {
crawler = new LaunchConfigJanitorCrawler(awsClient());
}
BasicJanitorContext janitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, crawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new LaunchConfigJanitor(awsClient(), janitorCtx);
}
private ImageJanitor getImageJanitor() {
JanitorCrawler crawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
crawler = new EddaImageJanitorCrawler(createEddaClient(),
configuration().getStr("simianarmy.janitor.image.ownerId"),
(int) configuration().getNumOrElse("simianarmy.janitor.image.crawler.lookBackDays", 60),
awsClient().region());
} else {
throw new RuntimeException("Image Janitor only works when Edda is enabled.");
}
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.unusedImageRule.enabled", false)) {
ruleEngine.addRule(new UnusedImageRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.unusedImageRule.retentionDays", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.unusedImageRule.lastReferenceDaysThreshold", 45)));
}
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)
&& getUntaggedRuleResourceSet().contains("IMAGE")) {
ruleEngine.addRule(new UntaggedRule(monkeyCalendar, getPropertySet("simianarmy.janitor.rule.untaggedRule.requiredTags"),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithOwner", 3),
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.untaggedRule.retentionDaysWithoutOwner",
8)));
}
BasicJanitorContext janitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, crawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new ImageJanitor(awsClient(), janitorCtx);
}
private ELBJanitor getELBJanitor() {
JanitorRuleEngine ruleEngine = createJanitorRuleEngine();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.orphanedELBRule.enabled", false)) {
ruleEngine.addRule(new OrphanedELBRule(monkeyCalendar,
(int) configuration().getNumOrElse(
"simianarmy.janitor.rule.orphanedELBRule.retentionDays", 7)));
}
JanitorCrawler elbCrawler;
if (configuration().getBoolOrElse("simianarmy.janitor.edda.enabled", false)) {
boolean useEddaApplicationOwner = configuration().getBoolOrElse("simianarmy.janitor.rule.orphanedELBRule.edda.useApplicationOwner", false);
String eddaFallbackOwnerEmail = configuration().getStr("simianarmy.janitor.rule.orphanedELBRule.edda.fallbackOwnerEmail");
elbCrawler = new EddaELBJanitorCrawler(createEddaClient(), eddaFallbackOwnerEmail, useEddaApplicationOwner, awsClient().region());
} else {
elbCrawler = new ELBJanitorCrawler(awsClient());
}
BasicJanitorContext elbJanitorCtx = new BasicJanitorContext(
monkeyRegion, ruleEngine, elbCrawler, janitorResourceTracker,
monkeyCalendar, configuration(), recorder());
return new ELBJanitor(awsClient(), elbJanitorCtx);
}
private EddaClient createEddaClient() {
return new EddaClient((int) configuration().getNumOrElse("simianarmy.janitor.edda.client.timeout", 30000),
(int) configuration().getNumOrElse("simianarmy.janitor.edda.client.retries", 3),
(int) configuration().getNumOrElse("simianarmy.janitor.edda.client.retryInterval", 1000),
configuration());
}
private Set<String> getEnabledResourceSet() {
Set<String> enabledResourceSet = new HashSet<String>();
String enabledResources = configuration().getStr("simianarmy.janitor.enabledResources");
if (StringUtils.isNotBlank(enabledResources)) {
for (String resourceType : enabledResources.split(",")) {
enabledResourceSet.add(resourceType.trim().toUpperCase());
}
}
return enabledResourceSet;
}
private Set<String> getUntaggedRuleResourceSet() {
Set<String> untaggedRuleResourceSet = new HashSet<String>();
if (configuration().getBoolOrElse("simianarmy.janitor.rule.untaggedRule.enabled", false)) {
String untaggedRuleResources = configuration().getStr("simianarmy.janitor.rule.untaggedRule.resources");
if (StringUtils.isNotBlank(untaggedRuleResources)) {
for (String resourceType : untaggedRuleResources.split(",")) {
untaggedRuleResourceSet.add(resourceType.trim().toUpperCase());
}
}
}
return untaggedRuleResourceSet;
}
private Set<String> getPropertySet(String property) {
Set<String> propertyValueSet = new HashSet<String>();
String propertyValue = configuration().getStr(property);
if (StringUtils.isNotBlank(propertyValue)) {
for (String propertyValueItem : propertyValue.split(",")) {
propertyValueSet.add(propertyValueItem.trim());
}
}
return propertyValueSet;
}
public JanitorEmailNotifier.Context getJanitorEmailNotifierContext() {
return new JanitorEmailNotifier.Context() {
@Override
public AmazonSimpleEmailServiceClient sesClient() {
return sesClient;
}
@Override
public String defaultEmail() {
return defaultEmail;
}
@Override
public int daysBeforeTermination() {
return daysBeforeTermination;
}
@Override
public String region() {
return monkeyRegion;
}
@Override
public JanitorResourceTracker resourceTracker() {
return janitorResourceTracker;
}
@Override
public JanitorEmailBuilder emailBuilder() {
return janitorEmailBuilder;
}
@Override
public MonkeyCalendar calendar() {
return monkeyCalendar;
}
@Override
public String[] ccEmails() {
return ccEmails;
}
@Override
public String sourceEmail() {
return sourceEmail;
}
@Override
public String ownerEmailDomain() {
return ownerEmailDomain;
}
};
}
/** {@inheritDoc} */
@Override
public List<AbstractJanitor> janitors() {
return janitors;
}
/** {@inheritDoc} */
@Override
public JanitorEmailNotifier emailNotifier() {
return emailNotifier;
}
@Override
public JanitorResourceTracker resourceTracker() {
return janitorResourceTracker;
}
/** The Context class for Janitor.
*/
public static class BasicJanitorContext implements AbstractJanitor.Context {
private final String region;
private final JanitorRuleEngine ruleEngine;
private final JanitorCrawler crawler;
private final JanitorResourceTracker resourceTracker;
private final MonkeyCalendar calendar;
private final MonkeyConfiguration config;
private final MonkeyRecorder recorder;
/**
* Constructor.
* @param region the region of the janitor
* @param ruleEngine the rule engine used by the janitor
* @param crawler the crawler used by the janitor
* @param resourceTracker the resource tracker used by the janitor
* @param calendar the calendar used by the janitor
* @param config the monkey configuration used by the janitor
*/
public BasicJanitorContext(String region, JanitorRuleEngine ruleEngine, JanitorCrawler crawler,
JanitorResourceTracker resourceTracker, MonkeyCalendar calendar, MonkeyConfiguration config,
MonkeyRecorder recorder) {
this.region = region;
this.resourceTracker = resourceTracker;
this.ruleEngine = ruleEngine;
this.crawler = crawler;
this.calendar = calendar;
this.config = config;
this.recorder = recorder;
}
@Override
public String region() {
return region;
}
@Override
public MonkeyConfiguration configuration() {
return config;
}
@Override
public MonkeyCalendar calendar() {
return calendar;
}
@Override
public JanitorRuleEngine janitorRuleEngine() {
return ruleEngine;
}
@Override
public JanitorCrawler janitorCrawler() {
return crawler;
}
@Override
public JanitorResourceTracker janitorResourceTracker() {
return resourceTracker;
}
@Override
public MonkeyRecorder recorder() {
return recorder;
}
}
}
| 4,802 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/janitor/BasicJanitorRuleEngine.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.JanitorRuleEngine;
import com.netflix.simianarmy.janitor.Rule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* Basic implementation of janitor rule engine that runs all containing rules to decide if a resource should be
* a candidate of cleanup.
*/
public class BasicJanitorRuleEngine implements JanitorRuleEngine {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicJanitorRuleEngine.class);
/** The rules to decide if a resource should be a candidate for cleanup. **/
private final List<Rule> rules;
/** The rules to decide if a resource should be excluded for cleanup. **/
private final List<Rule> exclusionRules;
/**
* The constructor of JanitorRuleEngine.
*/
public BasicJanitorRuleEngine() {
rules = new ArrayList<Rule>();
exclusionRules = new ArrayList<Rule>();
}
/**
* Decides whether the resource should be a candidate of cleanup based on the underlying rules. If any rule in the
* rule set thinks the resource should be a candidate of cleanup, the method returns false which indicates that the
* resource should be marked for cleanup. If multiple rules think the resource should be cleaned up, the rule with
* the nearest expected termination time fills the termination reason and expected termination time.
*
* @param resource
* The resource
* @return true if the resource is valid and should not be a candidate of cleanup based on the underlying rules,
* false otherwise.
*/
@Override
public boolean isValid(Resource resource) {
LOGGER.debug(String.format("Checking if resource %s of type %s is a cleanup candidate against %d rules and %d exclusion rules.",
resource.getId(), resource.getResourceType(), rules.size(), exclusionRules.size()));
for (Rule exclusionRule : exclusionRules) {
if (exclusionRule.isValid(resource)) {
LOGGER.info(String.format("Resource %s is not marked as a cleanup candidate because of an exclusion rule.", resource.getId()));
return true;
}
}
// We create a clone of the resource each time when we try the rule. In the first iteration of the rules
// we identify the rule with the nearest termination date if there is any rule considers the resource
// as a cleanup candidate. Then the rule is applied to the original resource.
Rule nearestRule = null;
if (rules.size() == 1) {
nearestRule = rules.get(0);
} else {
Date nearestTerminationTime = null;
for (Rule rule : rules) {
Resource clone = resource.cloneResource();
if (!rule.isValid(clone)) {
if (clone.getExpectedTerminationTime() != null) {
if (nearestTerminationTime == null || nearestTerminationTime.after(clone.getExpectedTerminationTime())) {
nearestRule = rule;
nearestTerminationTime = clone.getExpectedTerminationTime();
}
}
}
}
}
if (nearestRule != null && !nearestRule.isValid(resource)) {
LOGGER.info(String.format("Resource %s is marked as a cleanup candidate.", resource.getId()));
return false;
} else {
LOGGER.info(String.format("Resource %s is not marked as a cleanup candidate.", resource.getId()));
return true;
}
}
/** {@inheritDoc} */
@Override
public BasicJanitorRuleEngine addRule(Rule rule) {
rules.add(rule);
return this;
}
/** {@inheritDoc} */
@Override
public BasicJanitorRuleEngine addExclusionRule(Rule rule){
exclusionRules.add(rule);
return this;
}
/** {@inheritDoc} */
@Override
public List<Rule> getRules() {
return this.rules;
}
/** {@inheritDoc} */
@Override
public List<Rule> getExclusionRules() {
return this.exclusionRules;
}
}
| 4,803 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/chaos/BasicInstanceGroup.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.netflix.simianarmy.GroupType;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
/**
* The Class BasicInstanceGroup.
*/
public class BasicInstanceGroup implements InstanceGroup {
/** The name. */
private final String name;
/** The type. */
private final GroupType type;
/** The region. */
private final String region;
/** list of the tags of the ASG */
private final List<TagDescription> tags;
/**
* Instantiates a new basic instance group.
*
* @param name
* the name
* @param type
* the type
* @param tags
* the ASG tags
*/
public BasicInstanceGroup(String name, GroupType type, String region, List<TagDescription> tags) {
this.name = name;
this.type = type;
this.region = region;
this.tags = tags;
}
/** {@inheritDoc} */
public GroupType type() {
return type;
}
/** {@inheritDoc} */
public String name() {
return name;
}
/** {@inheritDoc} */
public String region() {
return region;
}
/** {@inheritDoc} */
public List<TagDescription> tags() {
return tags;
}
/** The list. */
private List<String> list = new LinkedList<String>();
/** {@inheritDoc} */
@Override
public List<String> instances() {
return Collections.unmodifiableList(list);
}
/** {@inheritDoc} */
@Override
public void addInstance(String instance) {
list.add(instance);
}
/** {@inheritDoc} */
@Override
public BasicInstanceGroup copyAs(String newName) {
BasicInstanceGroup newGroup = new BasicInstanceGroup(newName, this.type(), this.region(), this.tags());
for (String instance: this.instances()) {
newGroup.addInstance(instance);
}
return newGroup;
}
}
| 4,804 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/chaos/CloudFormationChaosMonkey.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import com.netflix.simianarmy.chaos.ChaosType;
/**
* The Class CloudFormationChaosMonkey. Strips out the random string generated by the CloudFormation api in
* the instance group name of the ASG we want to kill instances on
*/
public class CloudFormationChaosMonkey extends BasicChaosMonkey {
/**
* Instantiates a new cloud formation chaos monkey.
* @param ctx
* the ctx
*/
public CloudFormationChaosMonkey(Context ctx) {
super(ctx);
}
/**
* {@inheritDoc}
*/
@Override
protected boolean isGroupEnabled(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.isGroupEnabled(noSuffixGroup);
}
/**
* {@inheritDoc}
*/
@Override
protected Event terminateInstance(InstanceGroup group, String inst, ChaosType chaosType) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.terminateInstance(noSuffixGroup, inst, chaosType);
}
/**
* {@inheritDoc}
*/
@Override
protected boolean isMaxTerminationCountExceeded(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.isMaxTerminationCountExceeded(noSuffixGroup);
}
/**
* {@inheritDoc}
*/
@Override
protected double getEffectiveProbability(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
if (!super.isGroupEnabled(noSuffixGroup)) {
return 0;
}
return getEffectiveProbabilityFromCfg(noSuffixGroup);
}
/**
* Returns the lastOptInTimeInMilliseconds parameter for a group omitting the
* randomly generated suffix.
*/
@Override
protected long getLastOptInMilliseconds(InstanceGroup group) {
InstanceGroup noSuffixGroup = noSuffixInstanceGroup(group);
return super.getLastOptInMilliseconds(noSuffixGroup);
}
/**
* Return a copy of the instance group removing the randomly generated suffix from
* its name.
*/
public InstanceGroup noSuffixInstanceGroup(InstanceGroup group) {
String newName = group.name().replaceAll("(-)([^-]*$)", "");
InstanceGroup noSuffixGroup = group.copyAs(newName);
return noSuffixGroup;
}
}
| 4,805 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/chaos/BasicChaosInstanceSelector.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import com.google.common.collect.Lists;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import com.netflix.simianarmy.chaos.ChaosInstanceSelector;
/**
* The Class BasicChaosInstanceSelector.
*/
public class BasicChaosInstanceSelector implements ChaosInstanceSelector {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosInstanceSelector.class);
/** The Constant RANDOM. */
private static final Random RANDOM = new Random();
/**
* Logger, this is abstracted so subclasses (for testing) can reset logger to make it less verbose.
* @return the logger
*/
protected Logger logger() {
return LOGGER;
}
/** {@inheritDoc} */
@Override
public Collection<String> select(InstanceGroup group, double probability) {
int n = ((int) probability);
String selected = selectOneInstance(group, probability - n);
Collection<String> result = selectNInstances(group.instances(), n, selected);
if (selected != null) {
result.add(selected);
}
return result;
}
private Collection<String> selectNInstances(Collection<String> instances, int n, String selected) {
logger().info("Randomly selecting {} from {} instances, excluding {}",
new Object[] {n, instances.size(), selected});
List<String> copy = Lists.newArrayList();
for (String instance : instances) {
if (!instance.equals(selected)) {
copy.add(instance);
}
}
if (n >= copy.size()) {
return copy;
}
Collections.shuffle(copy);
return copy.subList(0, n);
}
private String selectOneInstance(InstanceGroup group, double probability) {
Validate.isTrue(probability < 1);
if (probability <= 0) {
logger().info("Group {} [type {}] has disabled probability: {}",
new Object[] {group.name(), group.type(), probability});
return null;
}
double rand = Math.random();
if (rand > probability || group.instances().isEmpty()) {
logger().info("Group {} [type {}] got lucky: {} > {}",
new Object[] {group.name(), group.type(), rand, probability});
return null;
}
return group.instances().get(RANDOM.nextInt(group.instances().size()));
}
}
| 4,806 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/chaos/BasicChaosEmailNotifier.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import com.netflix.simianarmy.chaos.ChaosEmailNotifier;
import com.netflix.simianarmy.chaos.ChaosType;
/** The basic implementation of the email notifier for Chaos monkey.
*
*/
public class BasicChaosEmailNotifier extends ChaosEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosEmailNotifier.class);
private final MonkeyConfiguration cfg;
private final String defaultEmail;
private final List<String> ccAddresses;
/** Constructor.
*
* @param cfg the monkey configuration
* @param sesClient the Amazon SES client
* @param defaultEmail the default email address to notify when the group does not have a
* owner email specified
* @param ccAddresses the cc email addresses for notifications
*/
public BasicChaosEmailNotifier(MonkeyConfiguration cfg, AmazonSimpleEmailServiceClient sesClient,
String defaultEmail, String... ccAddresses) {
super(sesClient);
this.cfg = cfg;
this.defaultEmail = defaultEmail;
this.ccAddresses = Arrays.asList(ccAddresses);
}
/**
* Sends an email notification for a termination of instance to a global
* email address.
* @param group the instance group
* @param instanceId the instance id
* @param chaosType the chosen chaos strategy
*/
@Override
public void sendTerminationGlobalNotification(InstanceGroup group, String instanceId, ChaosType chaosType) {
String to = cfg.getStr("simianarmy.chaos.notification.global.receiverEmail");
if (StringUtils.isBlank(to)) {
LOGGER.warn("Global email address was not set, but global email notification was enabled!");
return;
}
LOGGER.info("sending termination notification to global email address {}", to);
buildAndSendEmail(to, group, instanceId, chaosType);
}
/**
* Sends an email notification for a termination of instance to the group
* owner's email address.
* @param group the instance group
* @param instanceId the instance id
* @param chaosType the chosen chaos strategy
*/
@Override
public void sendTerminationNotification(InstanceGroup group, String instanceId, ChaosType chaosType) {
String to = getOwnerEmail(group);
LOGGER.info("sending termination notification to group owner email address {}", to);
buildAndSendEmail(to, group, instanceId, chaosType);
}
/**
* Gets the owner's email for a instance group.
* @param group the instance group
* @return the owner email of the instance group
*/
protected String getOwnerEmail(InstanceGroup group) {
String prop = String.format("simianarmy.chaos.%s.%s.ownerEmail", group.type(), group.name());
String ownerEmail = cfg.getStr(prop);
if (ownerEmail == null) {
LOGGER.info(String.format("Property %s is not set, use the default email address %s as"
+ " the owner email of group %s of type %s",
prop, defaultEmail, group.name(), group.type()));
return defaultEmail;
} else {
return ownerEmail;
}
}
/**
* Builds the body and subject for the email, sends the email.
* @param group
* the instance group
* @param instanceId
* the instance id
* @param to
* the email address to be sent to
* @param chaosType the chosen chaos strategy
*/
public void buildAndSendEmail(String to, InstanceGroup group, String instanceId, ChaosType chaosType) {
String body = buildEmailBody(group, instanceId, chaosType);
String subject;
boolean emailSubjectIsBody = cfg.getBoolOrElse(
"simianarmy.chaos.notification.subject.isBody", false);
if (emailSubjectIsBody) {
subject = body;
} else {
subject = buildEmailSubject(to);
}
sendEmail(to, subject, body);
}
@Override
public String buildEmailSubject(String to) {
String emailSubjectPrefix = cfg.getStrOrElse("simianarmy.chaos.notification.subject.prefix", "");
String emailSubjectSuffix = cfg.getStrOrElse("simianarmy.chaos.notification.subject.suffix", "");
return String.format("%sChaos Monkey Termination Notification for %s%s",
emailSubjectPrefix, to, emailSubjectSuffix);
}
/**
* Builds the body for the email.
* @param group
* the instance group
* @param instanceId
* the instance id
* @param chaosType the chosen chaos strategy
* @return the created string
*/
public String buildEmailBody(InstanceGroup group, String instanceId, ChaosType chaosType) {
String emailBodyPrefix = cfg.getStrOrElse("simianarmy.chaos.notification.body.prefix", "");
String emailBodySuffix = cfg.getStrOrElse("simianarmy.chaos.notification.body.suffix", "");
String body = emailBodyPrefix;
body += String.format("Instance %s of %s %s is being terminated by Chaos monkey.",
instanceId, group.type(), group.name());
if (chaosType != null) {
body += "\n";
body += String.format("Chaos type: %s.", chaosType.getKey());
}
body += emailBodySuffix;
return body;
}
@Override
public String[] getCcAddresses(String to) {
return ccAddresses.toArray(new String[ccAddresses.size()]);
}
@Override
public String getSourceAddress(String to) {
String prop = "simianarmy.chaos.notification.sourceEmail";
String sourceEmail = cfg.getStr(prop);
if (sourceEmail == null || !isValidEmail(sourceEmail)) {
String msg = String.format("Property %s is not set or its value is not a valid email.", prop);
LOGGER.error(msg);
throw new RuntimeException(msg);
}
return sourceEmail;
}
} | 4,807 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/basic/chaos/BasicChaosMonkey.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.basic.chaos;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.*;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.chaos.*;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* The Class BasicChaosMonkey.
*/
public class BasicChaosMonkey extends ChaosMonkey {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosMonkey.class);
/** The Constant NS. */
private static final String NS = "simianarmy.chaos.";
/** The cfg. */
private final MonkeyConfiguration cfg;
/** The runs per day. */
private final long runsPerDay;
/** The minimum value of the maxTerminationCountPerday property to be considered non-zero. **/
private static final double MIN_MAX_TERMINATION_COUNT_PER_DAY = 0.001;
private final MonkeyCalendar monkeyCalendar;
// When a mandatory termination is triggered due to the minimum termination limit is breached,
// the value below is used as the termination probability.
private static final double DEFAULT_MANDATORY_TERMINATION_PROBABILITY = 0.5;
private final List<ChaosType> allChaosTypes;
/**
* Instantiates a new basic chaos monkey.
* @param ctx
* the ctx
*/
public BasicChaosMonkey(ChaosMonkey.Context ctx) {
super(ctx);
this.cfg = ctx.configuration();
this.monkeyCalendar = ctx.calendar();
Calendar open = monkeyCalendar.now();
Calendar close = monkeyCalendar.now();
open.set(Calendar.HOUR, monkeyCalendar.openHour());
close.set(Calendar.HOUR, monkeyCalendar.closeHour());
allChaosTypes = Lists.newArrayList();
allChaosTypes.add(new ShutdownInstanceChaosType(cfg));
allChaosTypes.add(new BlockAllNetworkTrafficChaosType(cfg));
allChaosTypes.add(new DetachVolumesChaosType(cfg));
allChaosTypes.add(new BurnCpuChaosType(cfg));
allChaosTypes.add(new BurnIoChaosType(cfg));
allChaosTypes.add(new KillProcessesChaosType(cfg));
allChaosTypes.add(new NullRouteChaosType(cfg));
allChaosTypes.add(new FailEc2ChaosType(cfg));
allChaosTypes.add(new FailDnsChaosType(cfg));
allChaosTypes.add(new FailDynamoDbChaosType(cfg));
allChaosTypes.add(new FailS3ChaosType(cfg));
allChaosTypes.add(new FillDiskChaosType(cfg));
allChaosTypes.add(new NetworkCorruptionChaosType(cfg));
allChaosTypes.add(new NetworkLatencyChaosType(cfg));
allChaosTypes.add(new NetworkLossChaosType(cfg));
TimeUnit freqUnit = ctx.scheduler().frequencyUnit();
if (TimeUnit.DAYS == freqUnit) {
runsPerDay = ctx.scheduler().frequency();
} else {
long units = freqUnit.convert(close.getTimeInMillis() - open.getTimeInMillis(), TimeUnit.MILLISECONDS);
runsPerDay = units / ctx.scheduler().frequency();
}
}
/** {@inheritDoc} */
@Override
public void doMonkeyBusiness() {
context().resetEventReport();
cfg.reload();
if (!isChaosMonkeyEnabled()) {
return;
}
for (InstanceGroup group : context().chaosCrawler().groups()) {
if (isGroupEnabled(group)) {
if (isMaxTerminationCountExceeded(group)) {
continue;
}
double prob = getEffectiveProbability(group);
Collection<String> instances = context().chaosInstanceSelector().select(group, prob / runsPerDay);
for (String inst : instances) {
if (isMaxTerminationCountExceeded(group)) {
break;
}
ChaosType chaosType = pickChaosType(context().cloudClient(), inst);
if (chaosType == null) {
// This is surprising ... normally we can always just terminate it
LOGGER.warn("No chaos type was applicable to the instance: {}", inst);
continue;
}
terminateInstance(group, inst, chaosType);
}
}
}
}
private ChaosType pickChaosType(CloudClient cloudClient, String instanceId) {
Random random = new Random();
SshConfig sshConfig = new SshConfig(cfg);
ChaosInstance instance = new ChaosInstance(cloudClient, instanceId, sshConfig);
List<ChaosType> applicable = Lists.newArrayList();
for (ChaosType chaosType : allChaosTypes) {
if (chaosType.isEnabled() && chaosType.canApply(instance)) {
applicable.add(chaosType);
}
}
if (applicable.isEmpty()) {
return null;
}
int index = random.nextInt(applicable.size());
return applicable.get(index);
}
@Override
public Event terminateNow(String type, String name, ChaosType chaosType)
throws FeatureNotEnabledException, InstanceGroupNotFoundException {
Validate.notNull(type);
Validate.notNull(name);
cfg.reload(name);
if (!isChaosMonkeyEnabled()) {
String msg = String.format("Chaos monkey is not enabled for group %s [type %s]",
name, type);
LOGGER.info(msg);
throw new FeatureNotEnabledException(msg);
}
String prop = NS + "terminateOndemand.enabled";
if (cfg.getBool(prop)) {
InstanceGroup group = findInstanceGroup(type, name);
if (group == null) {
throw new InstanceGroupNotFoundException(type, name);
}
Collection<String> instances = context().chaosInstanceSelector().select(group, 1.0);
Validate.isTrue(instances.size() <= 1);
if (instances.size() == 1) {
return terminateInstance(group, instances.iterator().next(), chaosType);
} else {
throw new NotFoundException(String.format("No instance is found in group %s [type %s]",
name, type));
}
} else {
String msg = String.format("Group %s [type %s] does not allow on-demand termination, set %s=true",
name, type, prop);
LOGGER.info(msg);
throw new FeatureNotEnabledException(msg);
}
}
private void reportEventForSummary(EventTypes eventType, InstanceGroup group, String instanceId) {
context().reportEvent(createEvent(eventType, group, instanceId));
}
/**
* Handle termination error. This has been abstracted so subclasses can decide to continue causing chaos if desired.
*
* @param instance
* the instance
* @param e
* the exception
*/
protected void handleTerminationError(String instance, Throwable e) {
LOGGER.error("failed to terminate instance " + instance, e);
throw new RuntimeException("failed to terminate instance " + instance, e);
}
/** {@inheritDoc} */
@Override
public Event recordTermination(InstanceGroup group, String instance, ChaosType chaosType) {
Event evt = context().recorder().newEvent(Type.CHAOS, EventTypes.CHAOS_TERMINATION, group.region(), instance);
evt.addField("groupType", group.type().name());
evt.addField("groupName", group.name());
evt.addField("chaosType", chaosType.getKey());
context().recorder().recordEvent(evt);
return evt;
}
/** {@inheritDoc} */
@Override
public int getPreviousTerminationCount(InstanceGroup group, Date after) {
Map<String, String> query = new HashMap<String, String>();
query.put("groupType", group.type().name());
query.put("groupName", group.name());
List<Event> evts = context().recorder().findEvents(Type.CHAOS, EventTypes.CHAOS_TERMINATION, query, after);
return evts.size();
}
private Event createEvent(EventTypes chaosTermination, InstanceGroup group, String instance) {
Event evt = context().recorder().newEvent(Type.CHAOS, chaosTermination, group.region(), instance);
evt.addField("groupType", group.type().name());
evt.addField("groupName", group.name());
return evt;
}
/**
* Gets the effective probability value, returns 0 if the group is not enabled. Otherwise calls
* getEffectiveProbability.
* @param group
* @return the effective probability value for the instance group
*/
protected double getEffectiveProbability(InstanceGroup group) {
if (!isGroupEnabled(group)) {
return 0;
}
return getEffectiveProbabilityFromCfg(group);
}
/**
* Gets the effective probability value when the monkey processes an instance group, it uses the following
* logic in the order as listed below.
*
* 1) When minimum mandatory termination is enabled, a default non-zero probability is used for opted-in
* groups, if a) the application has been opted in for the last mandatory termination window
* and b) there was no terminations in the last mandatory termination window
* 2) Use the probability configured for the group type and name
* 3) Use the probability configured for the group
* 4) Use 1.0
* @param group
* @return double
*/
protected double getEffectiveProbabilityFromCfg(InstanceGroup group) {
String propName;
if (cfg.getBool(NS + "mandatoryTermination.enabled")) {
String mtwProp = NS + "mandatoryTermination.windowInDays";
int mandatoryTerminationWindowInDays = (int) cfg.getNumOrElse(mtwProp, 0);
if (mandatoryTerminationWindowInDays > 0
&& noTerminationInLastWindow(group, mandatoryTerminationWindowInDays)) {
double mandatoryProb = cfg.getNumOrElse(NS + "mandatoryTermination.defaultProbability",
DEFAULT_MANDATORY_TERMINATION_PROBABILITY);
LOGGER.info("There has been no terminations for group {} [type {}] in the last {} days,"
+ "setting the probability to {} for mandatory termination.",
new Object[]{group.name(), group.type(), mandatoryTerminationWindowInDays, mandatoryProb});
return mandatoryProb;
}
}
propName = "probability";
double prob = getNumFromCfgOrDefault(group, propName, 1.0);
LOGGER.info("Group {} [type {}] enabled [prob {}]", new Object[]{group.name(), group.type(), prob});
return prob;
}
protected double getNumFromCfgOrDefault(InstanceGroup group, String propName, double defaultValue) {
String defaultProp = String.format("%s%s.%s", NS, group.type(), propName);
String prop = String.format("%s%s.%s.%s", NS, group.type(), group.name(), propName);
return cfg.getNumOrElse(prop, cfg.getNumOrElse(defaultProp, defaultValue));
}
protected boolean getBoolFromCfgOrDefault(InstanceGroup group, String propName, boolean defaultValue) {
String defaultProp = String.format("%s%s.%s", NS, group.type(), propName);
String prop = String.format("%s%s.%s.%s", NS, group.type(), group.name(), propName);
return cfg.getBoolOrElse(prop, cfg.getBoolOrElse(defaultProp, defaultValue));
}
/**
* Returns lastOptInTimeInMilliseconds from the .properties file.
*
* @param group
* @return long
*/
protected long getLastOptInMilliseconds(InstanceGroup group) {
String prop = NS + group.type() + "." + group.name() + ".lastOptInTimeInMilliseconds";
long lastOptInTimeInMilliseconds = (long) cfg.getNumOrElse(prop, -1);
return lastOptInTimeInMilliseconds;
}
private boolean noTerminationInLastWindow(InstanceGroup group, int mandatoryTerminationWindowInDays) {
long lastOptInTimeInMilliseconds = getLastOptInMilliseconds(group);
if (lastOptInTimeInMilliseconds < 0) {
return false;
}
Calendar windowStart = monkeyCalendar.now();
windowStart.add(Calendar.DATE, -1 * mandatoryTerminationWindowInDays);
// return true if the window start is after the last opt-in time and
// there has been no termination since the window start
if (windowStart.getTimeInMillis() > lastOptInTimeInMilliseconds
&& getPreviousTerminationCount(group, windowStart.getTime()) <= 0) {
return true;
}
return false;
}
/**
* Checks to see if the given instance group is enabled.
* @param group
* @return boolean
*/
protected boolean isGroupEnabled(InstanceGroup group) {
boolean enabled = getBoolFromCfgOrDefault(group, "enabled", false);
if (enabled) {
return true;
} else {
String prop = NS + group.type() + "." + group.name() + ".enabled";
String defaultProp = NS + group.type() + ".enabled";
LOGGER.info("Group {} [type {}] disabled, set {}=true or {}=true",
new Object[]{group.name(), group.type(), prop, defaultProp});
return false;
}
}
private boolean isChaosMonkeyEnabled() {
String prop = NS + "enabled";
if (cfg.getBoolOrElse(prop, true)) {
return true;
}
LOGGER.info("ChaosMonkey disabled, set {}=true", prop);
return false;
}
private InstanceGroup findInstanceGroup(String type, String name) {
// Calling context().chaosCrawler().groups(name) causes a new crawl to get
// the up to date information for the group name.
for (InstanceGroup group : context().chaosCrawler().groups(name)) {
if (group.type().toString().equals(type) && group.name().equals(name)) {
return group;
}
}
LOGGER.warn("Failed to find instance group for type {} and name {}", type, name);
return null;
}
protected Event terminateInstance(InstanceGroup group, String inst, ChaosType chaosType) {
Validate.notNull(group);
Validate.notEmpty(inst);
String prop = NS + "leashed";
if (cfg.getBoolOrElse(prop, true)) {
LOGGER.info("leashed ChaosMonkey prevented from killing {} from group {} [{}], set {}=false",
new Object[]{inst, group.name(), group.type(), prop});
reportEventForSummary(EventTypes.CHAOS_TERMINATION_SKIPPED, group, inst);
return null;
} else {
try {
Event evt = recordTermination(group, inst, chaosType);
sendTerminationNotification(group, inst, chaosType);
SshConfig sshConfig = new SshConfig(cfg);
ChaosInstance chaosInstance = new ChaosInstance(context().cloudClient(), inst, sshConfig);
chaosType.apply(chaosInstance);
LOGGER.info("Terminated {} from group {} [{}] with {}",
new Object[]{inst, group.name(), group.type(), chaosType.getKey() });
reportEventForSummary(EventTypes.CHAOS_TERMINATION, group, inst);
return evt;
} catch (NotFoundException e) {
LOGGER.warn("Failed to terminate " + inst + ", it does not exist. Perhaps it was already terminated");
reportEventForSummary(EventTypes.CHAOS_TERMINATION_SKIPPED, group, inst);
return null;
} catch (Exception e) {
handleTerminationError(inst, e);
reportEventForSummary(EventTypes.CHAOS_TERMINATION_SKIPPED, group, inst);
return null;
}
}
}
/**
* Checks to see if the maximum termination window has been exceeded.
*
* @param group
* @return boolean
*/
protected boolean isMaxTerminationCountExceeded(InstanceGroup group) {
Validate.notNull(group);
String propName = "maxTerminationsPerDay";
double maxTerminationsPerDay = getNumFromCfgOrDefault(group, propName, 1.0);
if (maxTerminationsPerDay <= MIN_MAX_TERMINATION_COUNT_PER_DAY) {
String prop = String.format("%s%s.%s.%s", NS, group.type(), group.name(), propName);
LOGGER.info("ChaosMonkey is configured to not allow any killing from group {} [{}] "
+ "with max daily count set as {}", new Object[]{group.name(), group.type(), prop});
return true;
} else {
int daysBack = 1;
int maxCount = (int) maxTerminationsPerDay;
if (maxTerminationsPerDay < 1.0) {
daysBack = (int) Math.ceil(1 / maxTerminationsPerDay);
maxCount = 1;
}
Calendar after = monkeyCalendar.now();
after.add(Calendar.DATE, -1 * daysBack);
// Check if the group has exceeded the maximum terminations for the last period
int terminationCount = getPreviousTerminationCount(group, after.getTime());
if (terminationCount >= maxCount) {
LOGGER.info("The count of terminations for group {} [{}] in the last {} days is {},"
+ " equal or greater than the max count threshold {}",
new Object[]{group.name(), group.type(), daysBack, terminationCount, maxCount});
return true;
}
}
return false;
}
@Override
public void sendTerminationNotification(InstanceGroup group, String instance, ChaosType chaosType) {
String propEmailGlobalEnabled = "simianarmy.chaos.notification.global.enabled";
String propEmailGroupEnabled = String.format("%s%s.%s.notification.enabled", NS, group.type(), group.name());
ChaosEmailNotifier notifier = context().chaosEmailNotifier();
if (notifier == null) {
String msg = "Chaos email notifier is not set.";
LOGGER.error(msg);
throw new RuntimeException(msg);
}
if (cfg.getBoolOrElse(propEmailGroupEnabled, false)) {
notifier.sendTerminationNotification(group, instance, chaosType);
}
if (cfg.getBoolOrElse(propEmailGlobalEnabled, false)) {
notifier.sendTerminationGlobalNotification(group, instance, chaosType);
}
}
/**
* {@inheritDoc}
*/
@Override
public List<ChaosType> getChaosTypes() {
return Lists.newArrayList(allChaosTypes);
}
} | 4,808 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/DryRunnableJanitorException.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
public class DryRunnableJanitorException extends Exception {
public DryRunnableJanitorException(String message) {
super(message);
}
public DryRunnableJanitorException(String message, Throwable cause) {
super(message, cause);
}
}
| 4,809 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/JanitorEmailNotifier.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.aws.AWSEmailNotifier;
import org.apache.commons.lang.Validate;
import org.apache.commons.lang.StringUtils;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/** The email notifier implemented for Janitor Monkey. */
public class JanitorEmailNotifier extends AWSEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(JanitorEmailNotifier.class);
private static final String UNKNOWN_EMAIL = "UNKNOWN";
/**
* If the scheduled termination date is within 2 hours of notification date + headsup days,
* we don't need to extend the termination date.
*/
private static final int HOURS_IN_MARGIN = 2;
private final String region;
private final String defaultEmail;
private final List<String> ccEmails;
private final JanitorResourceTracker resourceTracker;
private final JanitorEmailBuilder emailBuilder;
private final MonkeyCalendar calendar;
private final int daysBeforeTermination;
private final String sourceEmail;
private final String ownerEmailDomain;
private final Map<String, Collection<Resource>> invalidEmailToResources =
new HashMap<String, Collection<Resource>>();
/**
* The Interface Context.
*/
public interface Context {
/**
* Gets the Amazon Simple Email Service client.
* @return the Amazon Simple Email Service client
*/
AmazonSimpleEmailServiceClient sesClient();
/**
* Gets the source email the notifier uses to send email.
* @return the source email
*/
String sourceEmail();
/**
* Gets the default email the notifier sends to when there is no owner specified for a resource.
* @return the default email
*/
String defaultEmail();
/**
* Gets the number of days a notification is sent before the expected termination date..
* @return the number of days a notification is sent before the expected termination date.
*/
int daysBeforeTermination();
/**
* Gets the region the notifier is running in.
* @return the region the notifier is running in.
*/
String region();
/** Gets the janitor resource tracker.
* @return the janitor resource tracker
*/
JanitorResourceTracker resourceTracker();
/** Gets the janitor email builder.
* @return the janitor email builder
*/
JanitorEmailBuilder emailBuilder();
/** Gets the calendar.
* @return the calendar
*/
MonkeyCalendar calendar();
/** Gets the cc email addresses.
* @return the cc email addresses
*/
String[] ccEmails();
/** Get the default domain of email addresses.
* @return the default domain of email addresses
*/
String ownerEmailDomain();
}
/**
* Constructor.
* @param ctx the context.
*/
public JanitorEmailNotifier(Context ctx) {
super(ctx.sesClient());
this.region = ctx.region();
this.defaultEmail = ctx.defaultEmail();
this.daysBeforeTermination = ctx.daysBeforeTermination();
this.resourceTracker = ctx.resourceTracker();
this.emailBuilder = ctx.emailBuilder();
this.calendar = ctx.calendar();
this.ccEmails = new ArrayList<String>();
String[] ctxCCs = ctx.ccEmails();
if (ctxCCs != null) {
for (String ccEmail : ctxCCs) {
this.ccEmails.add(ccEmail);
}
}
this.sourceEmail = ctx.sourceEmail();
this.ownerEmailDomain = ctx.ownerEmailDomain();
}
/**
* Gets all the resources that are marked and no notifications have been sent. Send email notifications
* for these resources. If there is a valid email address in the ownerEmail field of the resource, send
* to that address. Otherwise send to the default email address.
*/
public void sendNotifications() {
validateEmails();
Map<String, Collection<Resource>> emailToResources = new HashMap<String, Collection<Resource>>();
invalidEmailToResources.clear();
for (Resource r : getMarkedResources()) {
if (r.isOptOutOfJanitor()) {
LOGGER.info(String.format("Resource %s is opted out of Janitor Monkey so no notification is sent.",
r.getId()));
continue;
}
if (canNotify(r)) {
String email = r.getOwnerEmail();
if (email != null && !email.contains("@")
&& StringUtils.isNotBlank(this.ownerEmailDomain)) {
email = String.format("%s@%s", email, this.ownerEmailDomain);
}
if (!isValidEmail(email)) {
if (defaultEmail != null) {
LOGGER.info(String.format("Email %s is not valid, send to the default email address %s",
email, defaultEmail));
putEmailAndResource(emailToResources, defaultEmail, r);
} else {
if (email == null) {
email = UNKNOWN_EMAIL;
}
LOGGER.info(String.format("Email %s is not valid and default email is not set for resource %s",
email, r.getId()));
putEmailAndResource(invalidEmailToResources, email, r);
}
} else {
putEmailAndResource(emailToResources, email, r);
}
} else {
LOGGER.debug(String.format("Not the time to send notification for resource %s", r.getId()));
}
}
emailBuilder.setEmailToResources(emailToResources);
Date now = calendar.now().getTime();
for (Map.Entry<String, Collection<Resource>> entry : emailToResources.entrySet()) {
String email = entry.getKey();
String emailBody = emailBuilder.buildEmailBody(email);
String subject = buildEmailSubject(email);
sendEmail(email, subject, emailBody);
for (Resource r : entry.getValue()) {
LOGGER.debug(String.format("Notification is sent for resource %s", r.getId()));
r.setNotificationTime(now);
resourceTracker.addOrUpdate(r);
}
LOGGER.info(String.format("Email notification has been sent to %s for %d resources.",
email, entry.getValue().size()));
}
}
/**
* Gets the marked resources for notification. Allow overriding in subclasses.
* @return the marked resources
*/
protected Collection<Resource> getMarkedResources() {
return resourceTracker.getResources(null, CleanupState.MARKED, region);
}
private void validateEmails() {
if (defaultEmail != null) {
Validate.isTrue(isValidEmail(defaultEmail), String.format("Default email %s is invalid", defaultEmail));
}
if (ccEmails != null) {
for (String ccEmail : ccEmails) {
Validate.isTrue(isValidEmail(ccEmail), String.format("CC email %s is invalid", ccEmail));
}
}
}
@Override
public String buildEmailSubject(String email) {
return String.format("Janitor Monkey Notification for %s", email);
}
/**
* Decides if it is time for sending notification for the resource. This method can be
* overridden in subclasses so notifications can be send earlier or later.
* @param resource the resource
* @return true if it is OK to send notification now, otherwise false.
*/
protected boolean canNotify(Resource resource) {
Validate.notNull(resource);
if (resource.getState() != CleanupState.MARKED || resource.isOptOutOfJanitor()) {
return false;
}
Date notificationTime = resource.getNotificationTime();
// We don't want to send notification too early (since things may change) or too late (we need
// to give owners enough time to take actions.
Date windowStart = new Date(new DateTime(
calendar.getBusinessDay(calendar.now().getTime(), daysBeforeTermination).getTime())
.minusHours(HOURS_IN_MARGIN).getMillis());
Date windowEnd = calendar.getBusinessDay(calendar.now().getTime(), daysBeforeTermination + 1);
Date terminationDate = resource.getExpectedTerminationTime();
if (notificationTime == null
|| notificationTime.getTime() == 0
|| resource.getMarkTime().after(notificationTime)) { // remarked after a notification
if (!terminationDate.before(windowStart) && !terminationDate.after(windowEnd)) {
// The expected termination time is close enough for sending notification
return true;
} else if (terminationDate.before(windowStart)) {
// The expected termination date is too close. To give the owner time to take possible actions,
// we extend the expected termination time here.
LOGGER.info(String.format("It is less than %d days before the expected termination date,"
+ " of resource %s, extending the termination time to %s.",
daysBeforeTermination, resource.getId(), windowStart));
resource.setExpectedTerminationTime(windowStart);
resourceTracker.addOrUpdate(resource);
return true;
} else {
return false;
}
}
return false;
}
/**
* Gets the map from invalid email address to the resources that were supposed to be sent to the address.
*
* @return the map from invalid address to resources that failed to be delivered
*/
public Map<String, Collection<Resource>> getInvalidEmailToResources() {
return Collections.unmodifiableMap(invalidEmailToResources);
}
@Override
public String[] getCcAddresses(String to) {
return ccEmails.toArray(new String[ccEmails.size()]);
}
@Override
public String getSourceAddress(String to) {
return sourceEmail;
}
private void putEmailAndResource(
Map<String, Collection<Resource>> map, String email, Resource resource) {
Collection<Resource> resources = map.get(email);
if (resources == null) {
resources = new ArrayList<Resource>();
map.put(email, resources);
}
resources.add(resource);
}
}
| 4,810 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/JanitorEmailBuilder.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import java.util.Collection;
import java.util.Map;
import com.netflix.simianarmy.AbstractEmailBuilder;
import com.netflix.simianarmy.Resource;
/** The abstract class for building Janitor monkey email notifications. */
public abstract class JanitorEmailBuilder extends AbstractEmailBuilder {
/**
* Sets the map from an owner email to the resources that belong to the owner
* and need to send notifications for.
* @param emailToResources the map from owner email to the owned resource
*/
public abstract void setEmailToResources(Map<String, Collection<Resource>> emailToResources);
}
| 4,811 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/DryRunnableJanitor.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.netflix.simianarmy.Resource;
public interface DryRunnableJanitor extends Janitor {
default void cleanupDryRun(Resource markedResource) throws DryRunnableJanitorException {
// NO-OP
}
}
| 4,812 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/JanitorMonkey.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.MonkeyType;
import java.util.List;
/**
* The abstract class for a Janitor Monkey.
*/
public abstract class JanitorMonkey extends Monkey {
/** The key name of the Janitor tag used to tag resources. */
public static final String JANITOR_TAG = "janitor";
/** The key name of the Janitor meta tag used to tag resources. */
public static final String JANITOR_META_TAG = "JANITOR_META";
/** The key name of the tag instance used to tag resources. */
public static final String INSTANCE_TAG_KEY = "instance";
/** The key name of the tag detach time used to tag resources. */
public static final String DETACH_TIME_TAG_KEY = "detachTime";
/**
* The Interface Context.
*/
public interface Context extends Monkey.Context {
/**
* Configuration.
*
* @return the monkey configuration
*/
MonkeyConfiguration configuration();
/**
* Janitors run by this monkey.
* @return the janitors
*/
List<AbstractJanitor> janitors();
/**
* Email notifier used to send notifications by the janitor monkey.
* @return the email notifier
*/
JanitorEmailNotifier emailNotifier();
/**
* The region the monkey is running in.
* @return the region the monkey is running in.
*/
String region();
/**
* The accountName the monkey is running in.
* @return the accountName the monkey is running in.
*/
String accountName();
/**
* The Janitor resource tracker.
* @return the Janitor resource tracker.
*/
JanitorResourceTracker resourceTracker();
}
/** The context. */
private final Context ctx;
/**
* Instantiates a new janitor monkey.
*
* @param ctx
* the context.
*/
public JanitorMonkey(Context ctx) {
super(ctx);
this.ctx = ctx;
}
/**
* The monkey Type.
*/
public static enum Type implements MonkeyType {
/** janitor monkey. */
JANITOR
}
/**
* The event types that this monkey causes.
*/
public enum EventTypes implements EventType {
/** Marking a resource as a cleanup candidate. */
MARK_RESOURCE,
/** Un-Marking a resource. */
UNMARK_RESOURCE,
/** Clean up a resource. */
CLEANUP_RESOURCE,
/** Opt in a resource. */
OPT_IN_RESOURCE,
/** Opt out a resource. */
OPT_OUT_RESOURCE
}
/** {@inheritDoc} */
@Override
public final Type type() {
return Type.JANITOR;
}
/** {@inheritDoc} */
@Override
public Context context() {
return ctx;
}
/** {@inheritDoc} */
@Override
public abstract void doMonkeyBusiness();
/**
* Opt in a resource for Janitor Monkey.
* @param resourceId the resource id
* @return the opt-in event
*/
public abstract Event optInResource(String resourceId);
/**
* Opt out a resource for Janitor Monkey.
* @param resourceId the resource id
* @return the opt-out event
*/
public abstract Event optOutResource(String resourceId);
/**
* Opt in a resource for Janitor Monkey.
* @param resourceId the resource id
* @param region the region of the resource
* @return the opt-in event
*/
public abstract Event optInResource(String resourceId, String region);
/**
* Opt out a resource for Janitor Monkey.
* @param resourceId the resource id
* @param region the region of the resource
* @return the opt-out event
*/
public abstract Event optOutResource(String resourceId, String region);
}
| 4,813 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/AbstractJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import java.util.*;
import com.google.common.collect.Maps;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.annotations.MonitorTags;
import com.netflix.servo.monitor.BasicCounter;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.tag.BasicTagList;
import com.netflix.servo.tag.TagList;
import com.netflix.simianarmy.*;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.janitor.JanitorMonkey.EventTypes;
import com.netflix.simianarmy.janitor.JanitorMonkey.Type;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An abstract implementation of Janitor. It marks resources that the rule engine considers
* invalid as cleanup candidate and sets the expected termination date. It also removes the
* cleanup candidate flag from resources that no longer exist or the rule engine no longer
* considers invalid due to change of conditions. For resources marked as cleanup candidates
* and the expected termination date is passed, the janitor removes the resources from the
* cloud.
*/
public abstract class AbstractJanitor implements Janitor, DryRunnableJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractJanitor.class);
/** Tags to attach to servo metrics */
@MonitorTags
protected TagList tags;
private final String region;
/** The region the janitor is running in. */
public String getRegion() {
return region;
}
/**
* The rule engine used to decide if a resource should be a cleanup
* candidate.
*/
private final JanitorRuleEngine ruleEngine;
/** The janitor crawler to get resources from the cloud. */
private final JanitorCrawler crawler;
/** The resource type that the janitor is responsible for to clean up. **/
private final ResourceType resourceType;
/** The janitor resource tracker that is responsible for keeping track of
* resource status.
*/
private final JanitorResourceTracker resourceTracker;
private final Collection<Resource> markedResources = new ArrayList<Resource>();
private final Collection<Resource> cleanedResources = new ArrayList<Resource>();
private final Collection<Resource> unmarkedResources = new ArrayList<Resource>();
private final Collection<Resource> failedToCleanResources = new ArrayList<Resource>();
private final Collection<Resource> skippedVanishedOrValidResources = new ArrayList<>();
private final MonkeyCalendar calendar;
private final MonkeyConfiguration config;
/** Flag to indicate whether the Janitor is leashed. */
private boolean leashed;
private final MonkeyRecorder recorder;
/** The number of resources that have been checked on this run. */
private int checkedResourcesCount;
private Counter cleanupDryRunFailureCount = new BasicCounter(MonitorConfig.builder("dryRunCleanupFailures").build());
/**
* Sets the flag to indicate if the janitor is leashed.
*
* @param isLeashed true if the the janitor is leased, false otherwise.
*/
protected void setLeashed(boolean isLeashed) {
this.leashed = isLeashed;
}
/**
* The Interface Context.
*/
public interface Context {
/** Region.
*
* @return the region
*/
String region();
/**
* Configuration.
*
* @return the monkey configuration
*/
MonkeyConfiguration configuration();
/**
* Calendar.
*
* @return the monkey calendar
*/
MonkeyCalendar calendar();
/**
* Janitor rule engine.
* @return the janitor rule engine
*/
JanitorRuleEngine janitorRuleEngine();
/**
* Janitor crawler.
*
* @return the chaos crawler
*/
JanitorCrawler janitorCrawler();
/**
* Janitor resource tracker.
*
* @return the janitor resource tracker
*/
JanitorResourceTracker janitorResourceTracker();
/**
* Recorder.
*
* @return the recorder to record events
*/
MonkeyRecorder recorder();
}
/**
* Constructor.
* @param ctx the context
* @param resourceType the resource type the janitor is taking care
*/
public AbstractJanitor(Context ctx, ResourceType resourceType) {
Validate.notNull(ctx);
Validate.notNull(resourceType);
this.region = ctx.region();
Validate.notNull(region);
this.ruleEngine = ctx.janitorRuleEngine();
Validate.notNull(ruleEngine);
this.crawler = ctx.janitorCrawler();
Validate.notNull(crawler);
this.resourceTracker = ctx.janitorResourceTracker();
Validate.notNull(resourceTracker);
this.calendar = ctx.calendar();
Validate.notNull(calendar);
this.config = ctx.configuration();
Validate.notNull(config);
// By default the janitor is leashed.
this.leashed = config.getBoolOrElse("simianarmy.janitor.leashed", true);
this.resourceType = resourceType;
Validate.notNull(resourceType);
// recorder could be null and no events are recorded when it is.
this.recorder = ctx.recorder();
// setup servo tags, currently just tag each published metric with the region
this.tags = BasicTagList.of("simianarmy.janitor.region", ctx.region());
// register this janitor with servo
String monitorObjName = String.format("simianarmy.janitor.%s.%s", this.resourceType.name(), this.region);
Monitors.registerObject(monitorObjName, this);
}
@Override
public ResourceType getResourceType() {
return resourceType;
}
/**
* Clears this object's internal resource lists in preparation for a new
* run.
*
* This is an optional method as regular Janitor processing will
* automatically clear resource lists as it runs.
*
* This method offers an explicit clear so that the resources will be
* consistent across the run. For example, when starting a run after a
* previous run has finished, cleanedResources will be holding the cleaned
* resources from the prior run until cleanupResources() is called. By
* calling prepareToRun() first, the resource lists will be consistent
* for the entire run.
*/
public void prepareToRun() {
markedResources.clear();
unmarkedResources.clear();
checkedResourcesCount = 0;
cleanedResources.clear();
failedToCleanResources.clear();
}
/**
* Marks all resources obtained from the crawler as cleanup candidate if
* the janitor rule engine thinks so.
*/
@Override
public void markResources() {
if (config.getBoolOrElse("simianarmy.janitor.skipMark", false)) {
LOGGER.info("*****SKIPPING MARKING {}****", resourceType);
return ;
}
markedResources.clear();
unmarkedResources.clear();
checkedResourcesCount = 0;
Map<String, Resource> trackedMarkedResources = getTrackedMarkedResources();
List<Resource> crawledResources = crawler.resources(resourceType);
LOGGER.info("Looking for cleanup candidate in {} crawled resources. LeashMode={}", crawledResources.size(), leashed);
Date now = calendar.now().getTime();
for (Resource resource : crawledResources) {
checkedResourcesCount++;
Resource trackedResource = trackedMarkedResources.get(resource.getId());
if (!ruleEngine.isValid(resource)) {
// If the resource is already marked, ignore it
if (trackedResource != null) {
LOGGER.debug("Resource {} is already marked. LeashMode={}", resource.getId(), leashed);
continue;
}
LOGGER.info("Marking resource {} of type {} with expected termination time as {} LeashMode={}"
, resource.getId(), resource.getResourceType(), resource.getExpectedTerminationTime(), leashed);
resource.setState(CleanupState.MARKED);
resource.setMarkTime(now);
resourceTracker.addOrUpdate(resource);
if (!leashed && recorder != null) {
Event evt = recorder.newEvent(Type.JANITOR, EventTypes.MARK_RESOURCE, resource, resource.getId());
recorder.recordEvent(evt);
}
postMark(resource);
markedResources.add(resource);
} else if (trackedResource != null) {
// The resource was marked and now the rule engine does not consider it as a cleanup candidate.
// So the janitor needs to unmark the resource.
LOGGER.info("Unmarking resource {} LeashMode={}", resource.getId(), leashed);
resource.setState(CleanupState.UNMARKED);
resourceTracker.addOrUpdate(resource);
if (!leashed && recorder != null) {
Event evt = recorder.newEvent(
Type.JANITOR, EventTypes.UNMARK_RESOURCE, resource, resource.getId());
recorder.recordEvent(evt);
}
unmarkedResources.add(resource);
}
}
// Unmark the resources that are terminated by user so not returned by the crawler.
unmarkUserTerminatedResources(crawledResources, trackedMarkedResources);
}
/**
* Gets the existing resources that are marked as cleanup candidate. Allowing the subclass to override for e.g.
* to handle multi-region.
* @return the map from resource id to marked resource
*/
protected Map<String, Resource> getTrackedMarkedResources() {
Map<String, Resource> trackedMarkedResources = Maps.newHashMap();
for (Resource resource : resourceTracker.getResources(resourceType, Resource.CleanupState.MARKED, region)) {
trackedMarkedResources.put(resource.getId(), resource);
}
return trackedMarkedResources;
}
/**
* Cleans up all cleanup candidates that are OK to remove.
*/
@Override
public void cleanupResources() {
cleanedResources.clear();
failedToCleanResources.clear();
skippedVanishedOrValidResources.clear();
Map<String, Resource> trackedMarkedResources = getTrackedMarkedResources();
LOGGER.info("Checking {} marked resources for cleanup. LeashMode={}", trackedMarkedResources.size(), leashed);
Date now = calendar.now().getTime();
for (Resource markedResource : trackedMarkedResources.values()) {
if (config.getBoolOrElse("simianarmy.janitor.skipVanishedOrValidResources", false)) {
// find matching crawled resource. This ensures we always have the freshest resource.
List<Resource> matchingCrawledResources = Optional.ofNullable(crawler.resources(markedResource.getId()))
.orElse(Collections.emptyList());
LOGGER.info("Rechecking resource {} before deletion {} - matching candidates {}",
markedResource, markedResource.getResourceType(), matchingCrawledResources);
Optional<Resource> crawledResource = matchingCrawledResources.stream()
.filter(r -> r.equals(markedResource))
.findFirst();
if (!crawledResource.isPresent() || ruleEngine.isValid(crawledResource.get())) {
skippedVanishedOrValidResources.add(markedResource);
LOGGER.warn("Skipping resource {} that either no longer exists or is now valid", markedResource);
continue;
}
}
if (canClean(markedResource, now)) {
LOGGER.info("Cleaning up resource {} of type {}. LeashMode={}",
markedResource.getId(), markedResource.getResourceType().name(), leashed);
try {
if (leashed) {
cleanupDryRun(markedResource.cloneResource());
} else {
cleanup(markedResource);
markedResource.setActualTerminationTime(now);
markedResource.setState(Resource.CleanupState.JANITOR_TERMINATED);
resourceTracker.addOrUpdate(markedResource);
if (recorder != null) {
Event evt = recorder.newEvent(Type.JANITOR, EventTypes.CLEANUP_RESOURCE, markedResource,
markedResource.getId());
recorder.recordEvent(evt);
}
postCleanup(markedResource);
cleanedResources.add(markedResource);
}
} catch (Exception e) {
String message;
if (e instanceof DryRunnableJanitorException) {
message = String.format("Failed Dry Run cleanup of resource %s of type %s. LeashMode=%b",
markedResource.getId(), markedResource.getResourceType().name(), leashed);
cleanupDryRunFailureCount.increment();
} else {
message = String.format("Failed to clean up the resource %s of type %s. LeashMode=%b",
markedResource.getId(), markedResource.getResourceType().name(), leashed);
failedToCleanResources.add(markedResource);
}
LOGGER.error(message, e);
}
}
}
}
/** Determines if the input resource can be cleaned. The Janitor calls this method
* before cleaning up a resource and only cleans the resource when the method returns
* true. A resource is considered to be OK to clean if
* 1) it is marked as cleanup candidates
* 2) the expected termination time is already passed
* 3) the owner has already been notified about the cleanup
* 4) the resource is not opted out of Janitor monkey
* The method can be overridden in subclasses.
* @param resource the resource the Janitor considers to clean
* @param now the time that represents the current time
* @return true if the resource is OK to clean, false otherwise
*/
protected boolean canClean(Resource resource, Date now) {
return resource.getState() == Resource.CleanupState.MARKED
&& !resource.isOptOutOfJanitor()
&& resource.getExpectedTerminationTime() != null
&& resource.getExpectedTerminationTime().before(now)
&& resource.getNotificationTime() != null
&& resource.getNotificationTime().before(now);
}
/**
* Implements required operations after a resource is marked.
* @param resource The resource that is marked
*/
protected abstract void postMark(Resource resource);
/**
* Cleans a resource up, e.g. deleting the resource from the cloud.
* @param resource The resource that is cleaned up.
*/
protected abstract void cleanup(Resource resource);
/**
* Implements required operations after a resource is cleaned.
* @param resource The resource that is cleaned up.
*/
protected abstract void postCleanup(Resource resource);
/** gets the resources marked in the last run of the Janitor. */
public Collection<Resource> getMarkedResources() {
return Collections.unmodifiableCollection(markedResources);
}
/** gets the resources unmarked in the last run of the Janitor. */
public Collection<Resource> getUnmarkedResources() {
return Collections.unmodifiableCollection(unmarkedResources);
}
/** gets the resources cleaned in the last run of the Janitor. */
public Collection<Resource> getCleanedResources() {
return Collections.unmodifiableCollection(cleanedResources);
}
/** gets the resources that failed to be cleaned in the last run of the Janitor. */
public Collection<Resource> getFailedToCleanResources() {
return Collections.unmodifiableCollection(failedToCleanResources);
}
private void unmarkUserTerminatedResources(List<Resource> crawledResources, Map<String, Resource> trackedMarkedResources) {
Set<String> crawledResourceIds = new HashSet<String>();
for (Resource crawledResource : crawledResources) {
crawledResourceIds.add(crawledResource.getId());
}
if (config.getBoolOrElse("simianarmy.janitor.unmarkResourceNotReturnedByCrawler", false)) {
for (Resource markedResource : trackedMarkedResources.values()) {
if (!crawledResourceIds.contains(markedResource.getId())) {
// The resource does not exist anymore.
LOGGER.info("Resource {} is not returned by the crawler. It should already be terminated. LeashMode={}",
markedResource.getId(), leashed);
markedResource.setState(Resource.CleanupState.USER_TERMINATED);
resourceTracker.addOrUpdate(markedResource);
unmarkedResources.add(markedResource);
}
}
}
}
@Monitor(name="cleanedResourcesCount", type=DataSourceType.GAUGE)
public int getResourcesCleanedCount() {
return cleanedResources.size();
}
@Monitor(name="markedResourcesCount", type=DataSourceType.GAUGE)
public int getMarkedResourcesCount() {
return markedResources.size();
}
@Monitor(name="failedToCleanResourcesCount", type=DataSourceType.GAUGE)
public int getFailedToCleanResourcesCount() {
return failedToCleanResources.size();
}
@Monitor(name="unmarkedResourcesCount", type=DataSourceType.GAUGE)
public int getUnmarkedResourcesCount() {
return unmarkedResources.size();
}
@Monitor(name="checkedResourcesCount", type=DataSourceType.GAUGE)
public int getCheckedResourcesCount() {
return checkedResourcesCount;
}
@Monitor(name="skippedVanishedOrValidResources", type = DataSourceType.GAUGE)
public int skippedVanishedOrValidResources() {
return skippedVanishedOrValidResources.size();
}
public Counter getCleanupDryRunFailureCount() {
return cleanupDryRunFailureCount;
}
}
| 4,814 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/JanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import java.util.EnumSet;
import java.util.List;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
/**
* The crawler for janitor monkey.
*/
public interface JanitorCrawler {
/**
* Resource types.
*
* @return the type of resources this crawler crawls
*/
EnumSet<? extends ResourceType> resourceTypes();
/**
* Resources crawled by this crawler for a specific resource type.
*
* @param resourceType the resource type
* @return the list
*/
List<Resource> resources(ResourceType resourceType);
/**
* Gets the up to date information for a collection of resource ids. When the input argument is null
* or empty, the method returns all resources.
*
* @param resourceIds
* the resource ids
* @return the list of resources
*/
List<Resource> resources(String... resourceIds);
/**
* Gets the owner email for a resource to set the ownerEmail field when crawl.
* @param resource the resource
* @return the owner email of the resource
*/
String getOwnerEmailForResource(Resource resource);
}
| 4,815 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/Janitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.netflix.simianarmy.ResourceType;
/**
* The interface for a janitor that performs the mark and cleanup operations for
* cloud resources of a resource type.
*/
public interface Janitor {
/**
* Gets the resource type the janitor is cleaning up.
* @return the resource type the janitor is cleaning up.
*/
ResourceType getResourceType();
/**
* Mark cloud resources as cleanup candidates and remove the marks for resources
* that no longer exist or should not be cleanup candidates anymore.
*/
void markResources();
/**
* Clean the resources up that are marked as cleanup candidates when appropriate.
*/
void cleanupResources();
}
| 4,816 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/JanitorResourceTracker.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import java.util.List;
/**
* The interface to track the resources marked/cleaned by the Janitor Monkey.
*
*/
public interface JanitorResourceTracker {
/**
* Adds a resource to the tracker. If the resource with the same id already exists
* in the tracker, the method updates the record with the resource parameter.
* @param resource the resource to add or update
*/
void addOrUpdate(Resource resource);
/** Gets the list of resources of a specific resource type and cleanup state in a region.
*
* @param resourceType the resource type
* @param state the cleanup state of the resources
* @param region the region of the resources, when the parameter is null, the method returns
* resources from all regions
* @return list of resources that match the resource type, state and region
*/
List<Resource> getResources(ResourceType resourceType, Resource.CleanupState state, String region);
/** Gets the resource of a specific id.
*
* @param resourceId the resource id
* @return the resource that matches the resource id
*/
Resource getResource(String resourceId);
/** Gets the resource of a specific id.
*
* @param resourceId the resource id
* @param regionId the region id
* @return the resource that matches the resource id and region
*/
Resource getResource(String resourceId, String regionId);
}
| 4,817 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/JanitorRuleEngine.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.netflix.simianarmy.Resource;
import java.util.List;
/**
* The interface for janitor rule engine that can decide if a resource should be a candidate of cleanup
* based on a collection of rules.
*/
public interface JanitorRuleEngine {
/**
* Decides whether the resource should be a candidate of cleanup based on the underlying rules.
*
* @param resource
* The resource
* @return true if the resource is valid and should not be a candidate of cleanup based on the underlying rules,
* false otherwise.
*/
boolean isValid(Resource resource);
/**
* Add a rule to decide if a resource should be a candidate for cleanup.
*
* @param rule
* The rule to decide if a resource should be a candidate for cleanup.
* @return The JanitorRuleEngine object.
*/
JanitorRuleEngine addRule(Rule rule);
/**
* Add a rule to decide if a resource should be excluded for cleanup.
* Exclusion rules are evaluated before regular rules. If a resource
* matches an exclusion rule, it is excluded from all other cleanup rules.
*
* @param rule
* The rule to decide if a resource should be excluded for cleanup.
* @return The JanitorRuleEngine object.
*/
JanitorRuleEngine addExclusionRule(Rule rule);
/**
* Get rules to find out what's planned for enforcement.
*
* @return An ArrayList of Rules.
*/
List<Rule> getRules();
/**
* Get rules to find out what's excluded for enforcement.
*
* @return An ArrayList of Rules.
*/
List<Rule> getExclusionRules();
}
| 4,818 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/janitor/Rule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.janitor;
import com.netflix.simianarmy.Resource;
/**
* The rule implementing a logic to decide if a resource should be considered as a candidate of cleanup.
*/
public interface Rule {
/**
* Decides whether the resource should be a candidate of cleanup based on the underlying rule. When
* the rule considers the resource as a candidate of cleanup, it sets the expected termination time
* and termination reason of the resource.
*
* @param resource
* The resource
* @return true if the resource is valid and is not for cleanup, false otherwise
*/
boolean isValid(Resource resource);
}
| 4,819 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/RDSRecorder.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import com.amazonaws.AmazonClientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.basic.BasicRecorderEvent;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* The Class RDSRecorder. Records events to and fetched events from a RDS table (default SIMIAN_ARMY)
*/
@SuppressWarnings("serial")
public class RDSRecorder implements MonkeyRecorder {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(RDSRecorder.class);
private final String region;
/** The table. */
private final String table;
/** the jdbcTemplate */
JdbcTemplate jdbcTemplate = null;
public static final String FIELD_ID = "eventId";
public static final String FIELD_EVENT_TIME = "eventTime";
public static final String FIELD_MONKEY_TYPE = "monkeyType";
public static final String FIELD_EVENT_TYPE = "eventType";
public static final String FIELD_REGION = "region";
public static final String FIELD_DATA_JSON = "dataJson";
/**
* Instantiates a new RDS recorder.
*
*/
public RDSRecorder(String dbDriver, String dbUser,
String dbPass, String dbUrl, String dbTable, String region) {
HikariDataSource dataSource = new HikariDataSource();
dataSource.setDriverClassName(dbDriver);
dataSource.setJdbcUrl(dbUrl);
dataSource.setUsername(dbUser);
dataSource.setPassword(dbPass);
dataSource.setMaximumPoolSize(2);
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.table = dbTable;
this.region = region;
}
/**
* Instantiates a new RDS recorder. This constructor is intended
* for unit testing.
*
*/
public RDSRecorder(JdbcTemplate jdbcTemplate, String table, String region) {
this.jdbcTemplate = jdbcTemplate;
this.table = table;
this.region = region;
}
public JdbcTemplate getJdbcTemplate() {
return jdbcTemplate;
}
/** {@inheritDoc} */
@Override
public Event newEvent(MonkeyType monkeyType, EventType eventType, String reg, String id) {
return new BasicRecorderEvent(monkeyType, eventType, reg, id);
}
/** {@inheritDoc} */
@Override
public void recordEvent(Event evt) {
String evtTime = String.valueOf(evt.eventTime().getTime());
String name = String.format("%s-%s-%s-%s", evt.monkeyType().name(), evt.id(), region, evtTime);
String json;
try {
json = new ObjectMapper().writeValueAsString(evt.fields());
} catch (JsonProcessingException e) {
LOGGER.error("ERROR generating JSON when saving resource " + name, e);
return;
}
LOGGER.debug(String.format("Saving event %s to RDS table %s", name, table));
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table);
sb.append(" (");
sb.append(FIELD_ID).append(",");
sb.append(FIELD_EVENT_TIME).append(",");
sb.append(FIELD_MONKEY_TYPE).append(",");
sb.append(FIELD_EVENT_TYPE).append(",");
sb.append(FIELD_REGION).append(",");
sb.append(FIELD_DATA_JSON).append(") values (?,?,?,?,?,?)");
LOGGER.debug(String.format("Insert statement is '%s'", sb));
int updated = this.jdbcTemplate.update(sb.toString(),
evt.id(),
evt.eventTime().getTime(),
SimpleDBRecorder.enumToValue(evt.monkeyType()),
SimpleDBRecorder.enumToValue(evt.eventType()),
evt.region(),
json);
LOGGER.debug(String.format("%d rows inserted", updated));
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(Map<String, String> query, Date after) {
return findEvents(null, null, query, after);
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, Map<String, String> query, Date after) {
return findEvents(monkeyType, null, query, after);
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, EventType eventType, Map<String, String> query, Date after) {
ArrayList<Object> args = new ArrayList<>();
StringBuilder sqlquery = new StringBuilder(
String.format("select * from %s where region = ?", table));
args.add(region);
if (monkeyType != null) {
sqlquery.append(String.format(" and %s = ?", FIELD_MONKEY_TYPE));
args.add(SimpleDBRecorder.enumToValue(monkeyType));
}
if (eventType != null) {
sqlquery.append(String.format(" and %s = ?", FIELD_EVENT_TYPE));
args.add(SimpleDBRecorder.enumToValue(eventType));
}
for (Map.Entry<String, String> pair : query.entrySet()) {
sqlquery.append(String.format(" and %s like ?", FIELD_DATA_JSON));
args.add((String.format("%s: \"%s\"", pair.getKey(), pair.getValue())));
}
sqlquery.append(String.format(" and %s > ? order by %s desc", FIELD_EVENT_TIME, FIELD_EVENT_TIME));
args.add(new Long(after.getTime()));
LOGGER.debug(String.format("Query is '%s'", sqlquery));
List<Event> events = jdbcTemplate.query(sqlquery.toString(), args.toArray(), new RowMapper<Event>() {
public Event mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapEvent(rs);
}
});
return events;
}
private Event mapEvent(ResultSet rs) throws SQLException {
String json = rs.getString("dataJson");
ObjectMapper mapper = new ObjectMapper();
Event event = null;
try {
String id = rs.getString(FIELD_ID);
MonkeyType monkeyType = SimpleDBRecorder.valueToEnum(MonkeyType.class, rs.getString(FIELD_MONKEY_TYPE));
EventType eventType = SimpleDBRecorder.valueToEnum(EventType.class, rs.getString(FIELD_EVENT_TYPE));
String region = rs.getString(FIELD_REGION);
long time = rs.getLong(FIELD_EVENT_TIME);
event = new BasicRecorderEvent(monkeyType, eventType, region, id, time);
TypeReference<Map<String,String>> typeRef = new TypeReference<Map<String,String>>() {};
Map<String, String> map = mapper.readValue(json, typeRef);
for(String key : map.keySet()) {
event.addField(key, map.get(key));
}
}catch(IOException ie) {
LOGGER.error("Error parsing resource from json", ie);
}
return event;
}
/**
* Creates the RDS table, if it does not already exist.
*/
public void init() {
try {
if (this.region == null || this.region.equals("region-null")) {
// This is a mock with an invalid region; avoid a slow timeout
LOGGER.debug("Region=null; skipping RDS table creation");
return;
}
LOGGER.info("Creating RDS table: {}", table);
String sql = String.format("create table if not exists %s ("
+ " %s varchar(255),"
+ " %s BIGINT,"
+ " %s varchar(255),"
+ " %s varchar(255),"
+ " %s varchar(255),"
+ " %s varchar(4096) )",
table,
FIELD_ID,
FIELD_EVENT_TIME,
FIELD_MONKEY_TYPE,
FIELD_EVENT_TYPE,
FIELD_REGION,
FIELD_DATA_JSON);
LOGGER.debug("Create SQL is: '{}'", sql);
jdbcTemplate.execute(sql);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create RDS table", e);
}
}
}
| 4,820 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/STSAssumeRoleSessionCredentialsProvider.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.Date;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.AssumeRoleResult;
import com.amazonaws.services.securitytoken.model.Credentials;
/**
* AWSCredentialsProvider implementation that uses the AWS Security Token
* Service to assume a Role and create temporary, short-lived sessions to use
* for authentication.
*/
public class STSAssumeRoleSessionCredentialsProvider implements AWSCredentialsProvider {
/** Default duration for started sessions. */
public static final int DEFAULT_DURATION_SECONDS = 900;
/** Time before expiry within which credentials will be renewed. */
private static final int EXPIRY_TIME_MILLIS = 60 * 1000;
/** The client for starting STS sessions. */
private final AWSSecurityTokenService securityTokenService;
/** The current session credentials. */
private AWSSessionCredentials sessionCredentials;
/** The expiration time for the current session credentials. */
private Date sessionCredentialsExpiration;
/** The arn of the role to be assumed. */
private String roleArn;
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which makes a
* request to the AWS Security Token Service (STS), uses the provided
* {@link #roleArn} to assume a role and then request short lived session
* credentials, which will then be returned by this class's
* {@link #getCredentials()} method.
* @param roleArn
* The AWS ARN of the Role to be assumed.
*/
public STSAssumeRoleSessionCredentialsProvider(String roleArn) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient();
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which makes a
* request to the AWS Security Token Service (STS), uses the provided
* {@link #roleArn} to assume a role and then request short lived session
* credentials, which will then be returned by this class's
* {@link #getCredentials()} method.
* @param roleArn
* The AWS ARN of the Role to be assumed.
* @param clientConfiguration
* The AWS ClientConfiguration to use when making AWS API requests.
*/
public STSAssumeRoleSessionCredentialsProvider(String roleArn, ClientConfiguration clientConfiguration) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(clientConfiguration);
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified long lived AWS credentials to make a request to the AWS
* Security Token Service (STS), uses the provided {@link #roleArn} to
* assume a role and then request short lived session credentials, which
* will then be returned by this class's {@link #getCredentials()} method.
* @param longLivedCredentials
* The main AWS credentials for a user's account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentials longLivedCredentials, String roleArn) {
this(longLivedCredentials, roleArn, new ClientConfiguration());
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified long lived AWS credentials to make a request to the AWS
* Security Token Service (STS), uses the provided {@link #roleArn} to
* assume a role and then request short lived session credentials, which
* will then be returned by this class's {@link #getCredentials()} method.
* @param longLivedCredentials
* The main AWS credentials for a user's account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
* @param clientConfiguration
* Client configuration connection parameters.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentials longLivedCredentials, String roleArn,
ClientConfiguration clientConfiguration) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(longLivedCredentials, clientConfiguration);
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified credentials provider (which vends long lived AWS
* credentials) to make a request to the AWS Security Token Service (STS),
* usess the provided {@link #roleArn} to assume a role and then request
* short lived session credentials, which will then be returned by this
* class's {@link #getCredentials()} method.
* @param longLivedCredentialsProvider
* Credentials provider for the main AWS credentials for a user's
* account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentialsProvider longLivedCredentialsProvider,
String roleArn) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(longLivedCredentialsProvider);
}
/**
* Constructs a new STSAssumeRoleSessionCredentialsProvider, which will use
* the specified credentials provider (which vends long lived AWS
* credentials) to make a request to the AWS Security Token Service (STS),
* uses the provided {@link #roleArn} to assume a role and then request
* short lived session credentials, which will then be returned by this
* class's {@link #getCredentials()} method.
* @param longLivedCredentialsProvider
* Credentials provider for the main AWS credentials for a user's
* account.
* @param roleArn
* The AWS ARN of the Role to be assumed.
* @param clientConfiguration
* Client configuration connection parameters.
*/
public STSAssumeRoleSessionCredentialsProvider(AWSCredentialsProvider longLivedCredentialsProvider, String roleArn,
ClientConfiguration clientConfiguration) {
this.roleArn = roleArn;
securityTokenService = new AWSSecurityTokenServiceClient(longLivedCredentialsProvider, clientConfiguration);
}
@Override
public AWSCredentials getCredentials() {
if (needsNewSession()) {
startSession();
}
return sessionCredentials;
}
@Override
public void refresh() {
startSession();
}
/**
* Starts a new session by sending a request to the AWS Security Token
* Service (STS) to assume a Role using the long lived AWS credentials. This
* class then vends the short lived session credentials for the assumed Role
* sent back from STS.
*/
private void startSession() {
AssumeRoleResult assumeRoleResult = securityTokenService.assumeRole(new AssumeRoleRequest()
.withRoleArn(roleArn).withDurationSeconds(DEFAULT_DURATION_SECONDS).withRoleSessionName("SimianArmy"));
Credentials stsCredentials = assumeRoleResult.getCredentials();
sessionCredentials = new BasicSessionCredentials(stsCredentials.getAccessKeyId(),
stsCredentials.getSecretAccessKey(), stsCredentials.getSessionToken());
sessionCredentialsExpiration = stsCredentials.getExpiration();
}
/**
* Returns true if a new STS session needs to be started. A new STS session
* is needed when no session has been started yet, or if the last session is
* within {@link #EXPIRY_TIME_MILLIS} seconds of expiring.
* @return True if a new STS session needs to be started.
*/
private boolean needsNewSession() {
if (sessionCredentials == null) {
return true;
}
long timeRemaining = sessionCredentialsExpiration.getTime() - System.currentTimeMillis();
return timeRemaining < EXPIRY_TIME_MILLIS;
}
}
| 4,821 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/SimpleDBRecorder.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.model.Attribute;
import com.amazonaws.services.simpledb.model.CreateDomainRequest;
import com.amazonaws.services.simpledb.model.Item;
import com.amazonaws.services.simpledb.model.ListDomainsResult;
import com.amazonaws.services.simpledb.model.PutAttributesRequest;
import com.amazonaws.services.simpledb.model.ReplaceableAttribute;
import com.amazonaws.services.simpledb.model.SelectRequest;
import com.amazonaws.services.simpledb.model.SelectResult;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.MonkeyRecorder;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.NamedType;
import com.netflix.simianarmy.basic.BasicRecorderEvent;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The Class SimpleDBRecorder. Records events to and fetched events from a Amazon SimpleDB table (default SIMIAN_ARMY)
*/
@SuppressWarnings("serial")
public class SimpleDBRecorder implements MonkeyRecorder {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleDBRecorder.class);
private final AmazonSimpleDB simpleDBClient;
private final String region;
/** The domain. */
private final String domain;
/**
* The Enum Keys.
*/
private enum Keys {
/** The event id. */
id,
/** The event time. */
eventTime,
/** The region. */
region,
/** The record type. */
recordType,
/** The monkey type. */
monkeyType,
/** The event type. */
eventType;
/** The Constant KEYSET. */
public static final Set<String> KEYSET = Collections.unmodifiableSet(new HashSet<String>() {
{
for (Keys k : Keys.values()) {
add(k.toString());
}
}
});
};
/**
* Instantiates a new simple db recorder.
*
* @param awsClient
* the AWS client
* @param domain
* the domain
*/
public SimpleDBRecorder(AWSClient awsClient, String domain) {
Validate.notNull(awsClient);
Validate.notNull(domain);
this.simpleDBClient = awsClient.sdbClient();
this.region = awsClient.region();
this.domain = domain;
}
/**
* simple client. abstracted to aid testing
*
* @return the amazon simple db
*/
protected AmazonSimpleDB sdbClient() {
return simpleDBClient;
}
/**
* Enum to value. Converts an enum to "name|type" string
*
* @param e
* the e
* @return the string
*/
public static String enumToValue(NamedType e) {
return String.format("%s|%s", e.name(), e.getClass().getName());
}
/**
* Value to enum. Converts a "name|type" string back to an enum.
*
* @param value
* the value
* @return the enum
*/
public static <T extends NamedType> T valueToEnum(
Class<T> type, String value) {
// parts = [enum value, enum class type]
String[] parts = value.split("\\|", 2);
if (parts.length < 2) {
throw new RuntimeException("value " + value + " does not appear to be an internal enum format");
}
Class<?> enumClass;
try {
enumClass = Class.forName(parts[1]);
} catch (ClassNotFoundException e) {
throw new RuntimeException("class for enum value " + value + " not found");
}
if (!enumClass.isEnum()) {
throw new RuntimeException("value " + value + " does not appear to be of an enum type");
}
if (!type.isAssignableFrom(enumClass)) {
throw new RuntimeException("value " + value + " cannot be assigned to a variable of this type: "
+ type.getCanonicalName());
}
@SuppressWarnings("rawtypes")
Class<? extends Enum> enumType = enumClass.asSubclass(Enum.class);
@SuppressWarnings("unchecked")
T enumValue = (T) Enum.valueOf(enumType, parts[0]);
return enumValue;
}
/** {@inheritDoc} */
@Override
public Event newEvent(MonkeyType monkeyType, EventType eventType, String reg, String id) {
return new BasicRecorderEvent(monkeyType, eventType, reg, id);
}
/** {@inheritDoc} */
@Override
public void recordEvent(Event evt) {
String evtTime = String.valueOf(evt.eventTime().getTime());
List<ReplaceableAttribute> attrs = new LinkedList<ReplaceableAttribute>();
attrs.add(new ReplaceableAttribute(Keys.id.name(), evt.id(), true));
attrs.add(new ReplaceableAttribute(Keys.eventTime.name(), evtTime, true));
attrs.add(new ReplaceableAttribute(Keys.region.name(), evt.region(), true));
attrs.add(new ReplaceableAttribute(Keys.recordType.name(), "MonkeyEvent", true));
attrs.add(new ReplaceableAttribute(Keys.monkeyType.name(), enumToValue(evt.monkeyType()), true));
attrs.add(new ReplaceableAttribute(Keys.eventType.name(), enumToValue(evt.eventType()), true));
for (Map.Entry<String, String> pair : evt.fields().entrySet()) {
if (pair.getValue() == null || pair.getValue().equals("") || Keys.KEYSET.contains(pair.getKey())) {
continue;
}
attrs.add(new ReplaceableAttribute(pair.getKey(), pair.getValue(), true));
}
// Let pk contain the timestamp so that the same resource can have multiple events.
String pk = String.format("%s-%s-%s-%s", evt.monkeyType().name(), evt.id(), region, evtTime);
PutAttributesRequest putReq = new PutAttributesRequest(domain, pk, attrs);
sdbClient().putAttributes(putReq);
}
/**
* Find events.
*
* @param queryMap
* the query map
* @param after
* the start time to query for all events after
* @return the list
*/
protected List<Event> findEvents(Map<String, String> queryMap, long after) {
StringBuilder query = new StringBuilder(
String.format("select * from `%s` where region = '%s'", domain, region));
for (Map.Entry<String, String> pair : queryMap.entrySet()) {
query.append(String.format(" and %s = '%s'", pair.getKey(), pair.getValue()));
}
query.append(String.format(" and eventTime > '%d'", after));
// always return with most recent record first
query.append(" order by eventTime desc");
List<Event> list = new LinkedList<Event>();
SelectRequest request = new SelectRequest(query.toString());
request.setConsistentRead(Boolean.TRUE);
SelectResult result = new SelectResult();
do {
result = sdbClient().select(request.withNextToken(result.getNextToken()));
for (Item item : result.getItems()) {
Map<String, String> fields = new HashMap<String, String>();
Map<String, String> res = new HashMap<String, String>();
for (Attribute attr : item.getAttributes()) {
if (Keys.KEYSET.contains(attr.getName())) {
res.put(attr.getName(), attr.getValue());
} else {
fields.put(attr.getName(), attr.getValue());
}
}
String eid = res.get(Keys.id.name());
String ereg = res.get(Keys.region.name());
MonkeyType monkeyType = valueToEnum(MonkeyType.class, res.get(Keys.monkeyType.name()));
EventType eventType = valueToEnum(EventType.class, res.get(Keys.eventType.name()));
long eventTime = Long.parseLong(res.get(Keys.eventTime.name()));
list.add(new BasicRecorderEvent(monkeyType, eventType, ereg, eid, eventTime).addFields(fields));
}
} while (result.getNextToken() != null);
return list;
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(Map<String, String> query, Date after) {
return findEvents(query, after.getTime());
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, Map<String, String> query, Date after) {
Map<String, String> copy = new LinkedHashMap<String, String>(query);
copy.put(Keys.monkeyType.name(), enumToValue(monkeyType));
return findEvents(copy, after);
}
/** {@inheritDoc} */
@Override
public List<Event> findEvents(MonkeyType monkeyType, EventType eventType, Map<String, String> query, Date after) {
Map<String, String> copy = new LinkedHashMap<String, String>(query);
copy.put(Keys.monkeyType.name(), enumToValue(monkeyType));
copy.put(Keys.eventType.name(), enumToValue(eventType));
return findEvents(copy, after);
}
/**
* Creates the SimpleDB domain, if it does not already exist.
*/
public void init() {
try {
if (this.region == null || this.region.equals("region-null")) {
// This is a mock with an invalid region; avoid a slow timeout
LOGGER.debug("Region=null; skipping SimpleDB domain creation");
return;
}
ListDomainsResult listDomains = sdbClient().listDomains();
for (String d : listDomains.getDomainNames()) {
if (d.equals(domain)) {
LOGGER.debug("SimpleDB domain found: {}", domain);
return;
}
}
LOGGER.info("Creating SimpleDB domain: {}", domain);
CreateDomainRequest createDomainRequest = new CreateDomainRequest(
domain);
sdbClient().createDomain(createDomainRequest);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create SimpleDB domain", e);
}
}
}
| 4,822 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/AWSResourceType.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import com.netflix.simianarmy.ResourceType;
/**
* The enum of resource types of AWS.
*/
public enum AWSResourceType implements ResourceType {
/** AWS instance. */
INSTANCE,
/** AWS EBS volume. */
EBS_VOLUME,
/** AWS EBS snapshot. */
EBS_SNAPSHOT,
/** AWS auto scaling group. */
ASG,
/** AWS launch configuration. */
LAUNCH_CONFIG,
/** AWS S3 bucket. */
S3_BUCKET,
/** AWS security group. */
SECURITY_GROUP,
/** AWS Amazon Machine Image. **/
IMAGE,
/** AWS Elastic Load Balancer. **/
ELB
}
| 4,823 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/AWSEmailNotifier.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.amazonaws.services.simpleemail.model.Body;
import com.amazonaws.services.simpleemail.model.Content;
import com.amazonaws.services.simpleemail.model.Destination;
import com.amazonaws.services.simpleemail.model.Message;
import com.amazonaws.services.simpleemail.model.SendEmailRequest;
import com.amazonaws.services.simpleemail.model.SendEmailResult;
import com.netflix.simianarmy.MonkeyEmailNotifier;
/**
* The class implements the monkey email notifier using AWS simple email service
* for sending email.
*/
public abstract class AWSEmailNotifier implements MonkeyEmailNotifier {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AWSEmailNotifier.class);
private static final String EMAIL_PATTERN =
"^[_A-Za-z0-9-\\+\\.#]+(.[_A-Za-z0-9-#]+)*@"
+ "[A-Za-z0-9-]+(\\.[A-Za-z0-9]+)*(\\.[A-Za-z]{2,})$";
private final Pattern emailPattern;
private final AmazonSimpleEmailServiceClient sesClient;
/**
* The constructor.
*/
public AWSEmailNotifier(AmazonSimpleEmailServiceClient sesClient) {
super();
this.sesClient = sesClient;
this.emailPattern = Pattern.compile(EMAIL_PATTERN);
}
@Override
public void sendEmail(String to, String subject, String body) {
if (!isValidEmail(to)) {
LOGGER.error(String.format("The destination email address %s is not valid, no email is sent.", to));
return;
}
if (sesClient == null) {
String msg = "The email client is not set.";
LOGGER.error(msg);
throw new RuntimeException(msg);
}
Destination destination = new Destination().withToAddresses(to)
.withCcAddresses(getCcAddresses(to));
Content subjectContent = new Content(subject);
Content bodyContent = new Content();
Body msgBody = new Body(bodyContent);
msgBody.setHtml(new Content(body));
Message msg = new Message(subjectContent, msgBody);
String sourceAddress = getSourceAddress(to);
SendEmailRequest request = new SendEmailRequest(sourceAddress, destination, msg);
request.setReturnPath(sourceAddress);
LOGGER.debug(String.format("Sending email with subject '%s' to %s",
subject, to));
SendEmailResult result = null;
try {
result = sesClient.sendEmail(request);
} catch (Exception e) {
throw new RuntimeException(String.format("Failed to send email to %s", to), e);
}
LOGGER.info(String.format("Email to %s, result id is %s, subject is %s",
to, result.getMessageId(), subject));
}
@Override
public boolean isValidEmail(String email) {
if (email == null) {
return false;
}
if (!emailPattern.matcher(email).matches()) {
LOGGER.error(String.format("Invalid email address: %s", email));
return false;
}
if (email.equals("foo@bar.com")) {
LOGGER.error(String.format("Email address not changed from default; treating as invalid: %s", email));
return false;
}
return true;
}
}
| 4,824 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/AWSResource.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws;
import java.util.*;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.netflix.simianarmy.NamedType;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
/**
* The class represents general AWS resources that are managed by janitor monkey.
*/
public class AWSResource implements Resource {
private String id;
private ResourceType resourceType;
private String region;
private String ownerEmail;
private String description;
private String terminationReason;
private CleanupState state;
private Date expectedTerminationTime;
private Date actualTerminationTime;
private Date notificationTime;
private Date launchTime;
private Date markTime;
private boolean optOutOfJanitor;
private String awsResourceState;
/** The field name for resourceId. **/
public static final String FIELD_RESOURCE_ID = "resourceId";
/** The field name for resourceType. **/
public static final String FIELD_RESOURCE_TYPE = "resourceType";
/** The field name for region. **/
public static final String FIELD_REGION = "region";
/** The field name for owner email. **/
public static final String FIELD_OWNER_EMAIL = "ownerEmail";
/** The field name for description. **/
public static final String FIELD_DESCRIPTION = "description";
/** The field name for state. **/
public static final String FIELD_STATE = "state";
/** The field name for terminationReason. **/
public static final String FIELD_TERMINATION_REASON = "terminationReason";
/** The field name for expectedTerminationTime. **/
public static final String FIELD_EXPECTED_TERMINATION_TIME = "expectedTerminationTime";
/** The field name for actualTerminationTime. **/
public static final String FIELD_ACTUAL_TERMINATION_TIME = "actualTerminationTime";
/** The field name for notificationTime. **/
public static final String FIELD_NOTIFICATION_TIME = "notificationTime";
/** The field name for launchTime. **/
public static final String FIELD_LAUNCH_TIME = "launchTime";
/** The field name for markTime. **/
public static final String FIELD_MARK_TIME = "markTime";
/** The field name for isOptOutOfJanitor. **/
public static final String FIELD_OPT_OUT_OF_JANITOR = "optOutOfJanitor";
/** The field name for awsResourceState. **/
public static final String FIELD_AWS_RESOURCE_STATE = "awsResourceState";
/** The date format used to print or parse a Date value. **/
public static final DateTimeFormatter DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss");
/** The map from name to value for additional fields used by the resource. **/
private final Map<String, String> additionalFields = new HashMap<String, String>();
/** The map from AWS tag key to value for the resource. **/
private final Map<String, String> tags = new HashMap<String, String>();
/** {@inheritDoc} */
@Override
public Map<String, String> getFieldToValueMap() {
Map<String, String> fieldToValue = new HashMap<String, String>();
putToMapIfNotNull(fieldToValue, FIELD_RESOURCE_ID, getId());
putToMapIfNotNull(fieldToValue, FIELD_RESOURCE_TYPE, getResourceType());
putToMapIfNotNull(fieldToValue, FIELD_REGION, getRegion());
putToMapIfNotNull(fieldToValue, FIELD_OWNER_EMAIL, getOwnerEmail());
putToMapIfNotNull(fieldToValue, FIELD_DESCRIPTION, getDescription());
putToMapIfNotNull(fieldToValue, FIELD_STATE, getState());
putToMapIfNotNull(fieldToValue, FIELD_TERMINATION_REASON, getTerminationReason());
putToMapIfNotNull(fieldToValue, FIELD_EXPECTED_TERMINATION_TIME, printDate(getExpectedTerminationTime()));
putToMapIfNotNull(fieldToValue, FIELD_ACTUAL_TERMINATION_TIME, printDate(getActualTerminationTime()));
putToMapIfNotNull(fieldToValue, FIELD_NOTIFICATION_TIME, printDate(getNotificationTime()));
putToMapIfNotNull(fieldToValue, FIELD_LAUNCH_TIME, printDate(getLaunchTime()));
putToMapIfNotNull(fieldToValue, FIELD_MARK_TIME, printDate(getMarkTime()));
putToMapIfNotNull(fieldToValue, FIELD_AWS_RESOURCE_STATE, getAWSResourceState());
// Additional fields are serialized while tags are not. So if any tags need to be
// serialized as well, put them to additional fields.
fieldToValue.put(FIELD_OPT_OUT_OF_JANITOR, String.valueOf(isOptOutOfJanitor()));
fieldToValue.putAll(additionalFields);
return fieldToValue;
}
/**
* Parse a map from field name to value to a resource.
* @param fieldToValue the map from field name to value
* @return the resource that is de-serialized from the map
*/
public static AWSResource parseFieldtoValueMap(Map<String, String> fieldToValue) {
AWSResource resource = new AWSResource();
for (Map.Entry<String, String> field : fieldToValue.entrySet()) {
String name = field.getKey();
String value = field.getValue();
if (name.equals(FIELD_RESOURCE_ID)) {
resource.setId(value);
} else if (name.equals(FIELD_RESOURCE_TYPE)) {
resource.setResourceType(AWSResourceType.valueOf(value));
} else if (name.equals(FIELD_REGION)) {
resource.setRegion(value);
} else if (name.equals(FIELD_OWNER_EMAIL)) {
resource.setOwnerEmail(value);
} else if (name.equals(FIELD_DESCRIPTION)) {
resource.setDescription(value);
} else if (name.equals(FIELD_STATE)) {
resource.setState(CleanupState.valueOf(value));
} else if (name.equals(FIELD_TERMINATION_REASON)) {
resource.setTerminationReason(value);
} else if (name.equals(FIELD_EXPECTED_TERMINATION_TIME)) {
resource.setExpectedTerminationTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_ACTUAL_TERMINATION_TIME)) {
resource.setActualTerminationTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_NOTIFICATION_TIME)) {
resource.setNotificationTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_LAUNCH_TIME)) {
resource.setLaunchTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_MARK_TIME)) {
resource.setMarkTime(new Date(DATE_FORMATTER.parseDateTime(value).getMillis()));
} else if (name.equals(FIELD_AWS_RESOURCE_STATE)) {
resource.setAWSResourceState(value);
} else if (name.equals(FIELD_OPT_OUT_OF_JANITOR)) {
resource.setOptOutOfJanitor("true".equals(value));
} else {
// put all other fields into additional fields
resource.setAdditionalField(name, value);
}
}
return resource;
}
public String getAWSResourceState() {
return awsResourceState;
}
public void setAWSResourceState(String awsState) {
this.awsResourceState = awsState;
}
/** {@inheritDoc} */
@Override
public String getId() {
return id;
}
/** {@inheritDoc} */
@Override
public void setId(String id) {
this.id = id;
}
/** {@inheritDoc} */
@Override
public Resource withId(String resourceId) {
setId(resourceId);
return this;
}
/** {@inheritDoc} */
@Override
public ResourceType getResourceType() {
return resourceType;
}
/** {@inheritDoc} */
@Override
public void setResourceType(ResourceType resourceType) {
this.resourceType = resourceType;
}
/** {@inheritDoc} */
@Override
public Resource withResourceType(ResourceType type) {
setResourceType(type);
return this;
}
/** {@inheritDoc} */
@Override
public String getRegion() {
return region;
}
/** {@inheritDoc} */
@Override
public void setRegion(String region) {
this.region = region;
}
/** {@inheritDoc} */
@Override
public Resource withRegion(String resourceRegion) {
setRegion(resourceRegion);
return this;
}
/** {@inheritDoc} */
@Override
public String getOwnerEmail() {
return ownerEmail;
}
/** {@inheritDoc} */
@Override
public void setOwnerEmail(String ownerEmail) {
this.ownerEmail = ownerEmail;
}
/** {@inheritDoc} */
@Override
public Resource withOwnerEmail(String resourceOwner) {
setOwnerEmail(resourceOwner);
return this;
}
/** {@inheritDoc} */
@Override
public String getDescription() {
return description;
}
/** {@inheritDoc} */
@Override
public void setDescription(String description) {
this.description = description;
}
/** {@inheritDoc} */
@Override
public Resource withDescription(String resourceDescription) {
setDescription(resourceDescription);
return this;
}
/** {@inheritDoc} */
@Override
public Date getLaunchTime() {
return getCopyOfDate(launchTime);
}
/** {@inheritDoc} */
@Override
public void setLaunchTime(Date launchTime) {
this.launchTime = getCopyOfDate(launchTime);
}
/** {@inheritDoc} */
@Override
public Resource withLaunchTime(Date resourceLaunchTime) {
setLaunchTime(resourceLaunchTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getMarkTime() {
return getCopyOfDate(markTime);
}
/** {@inheritDoc} */
@Override
public void setMarkTime(Date markTime) {
this.markTime = getCopyOfDate(markTime);
}
/** {@inheritDoc} */
@Override
public Resource withMarkTime(Date resourceMarkTime) {
setMarkTime(resourceMarkTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getExpectedTerminationTime() {
return getCopyOfDate(expectedTerminationTime);
}
/** {@inheritDoc} */
@Override
public void setExpectedTerminationTime(Date expectedTerminationTime) {
this.expectedTerminationTime = getCopyOfDate(expectedTerminationTime);
}
/** {@inheritDoc} */
@Override
public Resource withExpectedTerminationTime(Date resourceExpectedTerminationTime) {
setExpectedTerminationTime(resourceExpectedTerminationTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getActualTerminationTime() {
return getCopyOfDate(actualTerminationTime);
}
/** {@inheritDoc} */
@Override
public void setActualTerminationTime(Date actualTerminationTime) {
this.actualTerminationTime = getCopyOfDate(actualTerminationTime);
}
/** {@inheritDoc} */
@Override
public Resource withActualTerminationTime(Date resourceActualTerminationTime) {
setActualTerminationTime(resourceActualTerminationTime);
return this;
}
/** {@inheritDoc} */
@Override
public Date getNotificationTime() {
return getCopyOfDate(notificationTime);
}
/** {@inheritDoc} */
@Override
public void setNotificationTime(Date notificationTime) {
this.notificationTime = getCopyOfDate(notificationTime);
}
/** {@inheritDoc} */
@Override
public Resource withNnotificationTime(Date resourceNotificationTime) {
setNotificationTime(resourceNotificationTime);
return this;
}
/** {@inheritDoc} */
@Override
public CleanupState getState() {
return state;
}
/** {@inheritDoc} */
@Override
public void setState(CleanupState state) {
this.state = state;
}
/** {@inheritDoc} */
@Override
public Resource withState(CleanupState resourceState) {
setState(resourceState);
return this;
}
/** {@inheritDoc} */
@Override
public String getTerminationReason() {
return terminationReason;
}
/** {@inheritDoc} */
@Override
public void setTerminationReason(String terminationReason) {
this.terminationReason = terminationReason;
}
/** {@inheritDoc} */
@Override
public Resource withTerminationReason(String resourceTerminationReason) {
setTerminationReason(resourceTerminationReason);
return this;
}
/** {@inheritDoc} */
@Override
public boolean isOptOutOfJanitor() {
return optOutOfJanitor;
}
/** {@inheritDoc} */
@Override
public void setOptOutOfJanitor(boolean optOutOfJanitor) {
this.optOutOfJanitor = optOutOfJanitor;
}
/** {@inheritDoc} */
@Override
public Resource withOptOutOfJanitor(boolean optOut) {
setOptOutOfJanitor(optOut);
return this;
}
private static Date getCopyOfDate(Date date) {
if (date == null) {
return null;
}
return new Date(date.getTime());
}
private static void putToMapIfNotNull(Map<String, String> map, String key, String value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value);
}
}
private static void putToMapIfNotNull(Map<String, String> map, String key, Enum<?> value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value.name());
}
}
private static void putToMapIfNotNull(Map<String, String> map, String key, NamedType value) {
Validate.notNull(map);
Validate.notNull(key);
if (value != null) {
map.put(key, value.name());
}
}
private static String printDate(Date date) {
if (date == null) {
return null;
}
return DATE_FORMATTER.print(date.getTime());
}
@Override
public Resource setAdditionalField(String fieldName, String fieldValue) {
Validate.notNull(fieldName);
Validate.notNull(fieldValue);
putToMapIfNotNull(additionalFields, fieldName, fieldValue);
return this;
}
@Override
public String getAdditionalField(String fieldName) {
return additionalFields.get(fieldName);
}
@Override
public Collection<String> getAdditionalFieldNames() {
return additionalFields.keySet();
}
@Override
public Resource cloneResource() {
Resource clone = new AWSResource()
.withActualTerminationTime(getActualTerminationTime())
.withDescription(getDescription())
.withExpectedTerminationTime(getExpectedTerminationTime())
.withId(getId())
.withLaunchTime(getLaunchTime())
.withMarkTime(getMarkTime())
.withNnotificationTime(getNotificationTime())
.withOwnerEmail(getOwnerEmail())
.withRegion(getRegion())
.withResourceType(getResourceType())
.withState(getState())
.withTerminationReason(getTerminationReason())
.withOptOutOfJanitor(isOptOutOfJanitor());
((AWSResource) clone).setAWSResourceState(awsResourceState);
((AWSResource) clone).additionalFields.putAll(additionalFields);
for (String key : this.getAllTagKeys()) {
clone.setTag(key, this.getTag(key));
}
return clone;
}
/** {@inheritDoc} */
@Override
public void setTag(String key, String value) {
tags.put(key, value);
}
/** {@inheritDoc} */
@Override
public String getTag(String key) {
return tags.get(key);
}
/** {@inheritDoc} */
@Override
public Collection<String> getAllTagKeys() {
return tags.keySet();
}
@Override
public String toString() {
return "AWSResource{" +
"id='" + id + '\'' +
", resourceType=" + resourceType +
", region='" + region + '\'' +
", ownerEmail='" + ownerEmail + '\'' +
", description='" + description + '\'' +
", terminationReason='" + terminationReason + '\'' +
", state=" + state +
", expectedTerminationTime=" + expectedTerminationTime +
", actualTerminationTime=" + actualTerminationTime +
", notificationTime=" + notificationTime +
", launchTime=" + launchTime +
", markTime=" + markTime +
", optOutOfJanitor=" + optOutOfJanitor +
", awsResourceState='" + awsResourceState + '\'' +
", additionalFields=" + additionalFields +
", tags=" + tags +
'}';
}
@Override
public boolean equals(Object o) {
// consider two resources to be equivalent if id, resourceType and region match
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AWSResource that = (AWSResource) o;
return Objects.equals(id, that.id) &&
Objects.equals(resourceType, that.resourceType) &&
Objects.equals(region, that.region);
}
@Override
public int hashCode() {
// consider two resources to be equivalent if id, resourceType and region match
return Objects.hash(id, resourceType, region);
}
}
| 4,825 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/RDSConformityClusterTracker.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity;
import com.amazonaws.AmazonClientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityClusterTracker;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The RDSConformityClusterTracker implementation in RDS (relational database).
*/
public class RDSConformityClusterTracker implements ConformityClusterTracker {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(RDSConformityClusterTracker.class);
/** The table. */
private final String table;
/** the jdbcTemplate */
JdbcTemplate jdbcTemplate = null;
/**
* Instantiates a new RDS db resource tracker.
*
*/
public RDSConformityClusterTracker(String dbDriver, String dbUser,
String dbPass, String dbUrl, String dbTable) {
HikariDataSource dataSource = new HikariDataSource();
dataSource.setDriverClassName(dbDriver);
dataSource.setJdbcUrl(dbUrl);
dataSource.setUsername(dbUser);
dataSource.setPassword(dbPass);
dataSource.setMaximumPoolSize(2);
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.table = dbTable;
}
/**
* Instantiates a new RDS conformity cluster tracker. This constructor is intended
* for unit testing.
*
*/
public RDSConformityClusterTracker(JdbcTemplate jdbcTemplate, String table) {
this.jdbcTemplate = jdbcTemplate;
this.table = table;
}
public JdbcTemplate getJdbcTemplate() {
return jdbcTemplate;
}
public Object value(String value) {
return value == null ? Types.NULL : value;
}
public Object value(Date value) {
return value == null ? Types.NULL : value.getTime();
}
public Object value(boolean value) {
return Boolean.toString(value);
}
public Object emailValue(String email) {
if (StringUtils.isBlank(email)) return Types.NULL;
if (email.equals("0")) return Types.NULL;
return email;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Cluster cluster) {
Cluster orig = getCluster(cluster.getName(), cluster.getRegion());
LOGGER.debug(String.format("Saving cluster %s to RDB table %s in region %s", cluster.getName(), cluster.getRegion(), table));
Map<String, String> map = cluster.getFieldToValueMap();
String conformityJson;
try {
conformityJson = new ObjectMapper().writeValueAsString(conformitiesAsMap(cluster));
} catch (JsonProcessingException e) {
LOGGER.error("ERROR generating conformities JSON when saving cluster " + cluster.getName() + ", " + cluster.getRegion(), e);
return;
}
if (orig == null) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table);
sb.append(" (");
sb.append(Cluster.CLUSTER).append(",");
sb.append(Cluster.REGION).append(",");
sb.append(Cluster.OWNER_EMAIL).append(",");
sb.append(Cluster.IS_CONFORMING).append(",");
sb.append(Cluster.IS_OPTEDOUT).append(",");
sb.append(Cluster.UPDATE_TIMESTAMP).append(",");
sb.append(Cluster.EXCLUDED_RULES).append(",");
sb.append("conformities").append(",");
sb.append(Cluster.CONFORMITY_RULES);
sb.append(") values (?,?,?,?,?,?,?,?,?)");
LOGGER.debug(String.format("Insert statement is '%s'", sb));
this.jdbcTemplate.update(sb.toString(),
value(map.get(Cluster.CLUSTER)),
value(map.get(Cluster.REGION)),
emailValue(map.get(Cluster.OWNER_EMAIL)),
value(map.get(Cluster.IS_CONFORMING)),
value(map.get(Cluster.IS_OPTEDOUT)),
value(cluster.getUpdateTime()),
value(map.get(Cluster.EXCLUDED_RULES)),
value(conformityJson),
value(map.get(Cluster.CONFORMITY_RULES)));
} else {
StringBuilder sb = new StringBuilder();
sb.append("update ").append(table).append(" set ");
sb.append(Cluster.OWNER_EMAIL).append("=?,");
sb.append(Cluster.IS_CONFORMING).append("=?,");
sb.append(Cluster.IS_OPTEDOUT).append("=?,");
sb.append(Cluster.UPDATE_TIMESTAMP).append("=?,");
sb.append(Cluster.EXCLUDED_RULES).append("=?,");
sb.append("conformities").append("=?,");
sb.append(Cluster.CONFORMITY_RULES).append("=? where ");
sb.append(Cluster.CLUSTER).append("=? and ");
sb.append(Cluster.REGION).append("=?");
LOGGER.debug(String.format("Update statement is '%s'", sb));
this.jdbcTemplate.update(sb.toString(),
emailValue(map.get(Cluster.OWNER_EMAIL)),
value(map.get(Cluster.IS_CONFORMING)),
value(map.get(Cluster.IS_OPTEDOUT)),
value(cluster.getUpdateTime()),
value(map.get(Cluster.EXCLUDED_RULES)),
value(conformityJson),
value(map.get(Cluster.CONFORMITY_RULES)),
value(cluster.getName()),
value(cluster.getRegion()));
}
LOGGER.debug("Successfully saved.");
}
private HashMap<String,String> conformitiesAsMap(Cluster cluster) {
HashMap<String,String> map = new HashMap<>();
for(Conformity conformity : cluster.getConformties()) {
map.put(conformity.getRuleId(), StringUtils.join(conformity.getFailedComponents(), ","));
}
return map;
}
/**
* Gets the clusters for a list of regions. If the regions parameter is empty, returns the clusters
* for all regions.
*/
@Override
public List<Cluster> getAllClusters(String... regions) {
return getClusters(null, regions);
}
@Override
public List<Cluster> getNonconformingClusters(String... regions) {
return getClusters(false, regions);
}
@Override
public Cluster getCluster(String clusterName, String region) {
Validate.notEmpty(clusterName);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where cluster = ? and region = ?", table));
LOGGER.info(String.format("Query is '%s'", query));
List<Cluster> clusters = jdbcTemplate.query(query.toString(), new String[] {clusterName, region}, new RowMapper<Cluster>() {
public Cluster mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
Validate.isTrue(clusters.size() <= 1);
if (clusters.size() == 0) {
LOGGER.info(String.format("Not found cluster with name %s in region %s", clusterName, region));
return null;
} else {
Cluster cluster = clusters.get(0);
return cluster;
}
}
private Cluster mapResource(ResultSet rs) throws SQLException {
Map<String, String> map = conformityMapFromJson(rs.getString("conformities"));
map.put(Cluster.CLUSTER, rs.getString(Cluster.CLUSTER));
map.put(Cluster.REGION, rs.getString(Cluster.REGION));
map.put(Cluster.IS_CONFORMING, rs.getString(Cluster.IS_CONFORMING));
map.put(Cluster.IS_OPTEDOUT, rs.getString(Cluster.IS_OPTEDOUT));
String email = rs.getString(Cluster.OWNER_EMAIL);
if (StringUtils.isBlank(email) || email.equals("0")) {
email = null;
}
map.put(Cluster.OWNER_EMAIL, email);
String updatedTimestamp = millisToFormattedDate(rs.getString(Cluster.UPDATE_TIMESTAMP));
if (updatedTimestamp != null) {
map.put(Cluster.UPDATE_TIMESTAMP, updatedTimestamp);
}
map.put(Cluster.EXCLUDED_RULES, rs.getString(Cluster.EXCLUDED_RULES));
map.put(Cluster.CONFORMITY_RULES, rs.getString(Cluster.CONFORMITY_RULES));
return Cluster.parseFieldToValueMap(map);
}
private String millisToFormattedDate(String millisStr) {
String datetime = null;
try {
long millis = Long.parseLong(millisStr);
datetime = AWSResource.DATE_FORMATTER.print(millis);
} catch(NumberFormatException nfe) {
LOGGER.error(String.format("Error parsing datetime %s when reading from RDS", millisStr));
}
return datetime;
}
private HashMap<String,String> conformityMapFromJson(String json) throws SQLException {
HashMap<String,String> map = new HashMap<>();
if (json != null) {
TypeReference<HashMap<String,String>> typeRef = new TypeReference<HashMap<String,String>>() {};
try {
ObjectMapper mapper = new ObjectMapper();
map = mapper.readValue(json, typeRef);
}catch(IOException ie) {
String msg = "Error parsing conformities from result set";
LOGGER.error(msg, ie);
throw new SQLException(msg);
}
}
return map;
}
@Override
public void deleteClusters(Cluster... clusters) {
Validate.notNull(clusters);
LOGGER.info(String.format("Deleting %d clusters", clusters.length));
for (Cluster cluster : clusters) {
LOGGER.info(String.format("Deleting cluster %s", cluster.getName()));
String stmt = String.format("delete from %s where %s=? and %s=?", table, Cluster.CLUSTER, Cluster.REGION);
jdbcTemplate.update(stmt, cluster.getName(), cluster.getRegion());
LOGGER.info(String.format("Successfully deleted cluster %s", cluster.getName()));
}
}
private List<Cluster> getClusters(Boolean conforming, String... regions) {
Validate.notNull(regions);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where cluster is not null and ", table));
boolean needsAnd = false;
if (regions.length != 0) {
query.append(String.format("region in ('%s') ", StringUtils.join(regions, "','")));
needsAnd = true;
}
if (conforming != null) {
if (needsAnd) {
query.append(" and ");
}
query.append(String.format("isConforming = '%s'", conforming));
}
LOGGER.info(String.format("Query to retrieve clusters for regions %s is '%s'",
StringUtils.join(regions, "','"), query.toString()));
List<Cluster> clusters = jdbcTemplate.query(query.toString(), new RowMapper<Cluster>() {
public Cluster mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
LOGGER.info(String.format("Retrieved %d clusters from RDS DB in table %s and regions %s",
clusters.size(), table, StringUtils.join(regions, "','")));
return clusters;
}
/**
* Creates the RDS table, if it does not already exist.
*/
public void init() {
try {
LOGGER.info("Creating RDS table: {}", table);
String sql = String.format("create table if not exists %s ("
+ " %s varchar(255),"
+ " %s varchar(25),"
+ " %s varchar(255),"
+ " %s varchar(10),"
+ " %s varchar(10),"
+ " %s BIGINT,"
+ " %s varchar(4096),"
+ " %s varchar(4096),"
+ " %s varchar(4096) )",
table,
Cluster.CLUSTER,
Cluster.REGION,
Cluster.OWNER_EMAIL,
Cluster.IS_CONFORMING,
Cluster.IS_OPTEDOUT,
Cluster.UPDATE_TIMESTAMP,
Cluster.EXCLUDED_RULES,
"conformities",
Cluster.CONFORMITY_RULES);
LOGGER.debug("Create SQL is: '{}'", sql);
jdbcTemplate.execute(sql);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create RDS table", e);
}
}
} | 4,826 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/SimpleDBConformityClusterTracker.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.model.Attribute;
import com.amazonaws.services.simpledb.model.DeleteAttributesRequest;
import com.amazonaws.services.simpledb.model.Item;
import com.amazonaws.services.simpledb.model.PutAttributesRequest;
import com.amazonaws.services.simpledb.model.ReplaceableAttribute;
import com.amazonaws.services.simpledb.model.SelectRequest;
import com.amazonaws.services.simpledb.model.SelectResult;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.ConformityClusterTracker;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The ConformityResourceTracker implementation in SimpleDB.
*/
public class SimpleDBConformityClusterTracker implements ConformityClusterTracker {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleDBConformityClusterTracker.class);
/** The domain. */
private final String domain;
/** The SimpleDB client. */
private final AmazonSimpleDB simpleDBClient;
private static final int MAX_ATTR_SIZE = 1024;
/**
* Instantiates a new simple db cluster tracker for conformity monkey.
*
* @param awsClient
* the AWS Client
* @param domain
* the domain
*/
public SimpleDBConformityClusterTracker(AWSClient awsClient, String domain) {
Validate.notNull(awsClient);
Validate.notNull(domain);
this.domain = domain;
this.simpleDBClient = awsClient.sdbClient();
}
/**
* Gets the SimpleDB client.
* @return the SimpleDB client
*/
protected AmazonSimpleDB getSimpleDBClient() {
return simpleDBClient;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Cluster cluster) {
List<ReplaceableAttribute> attrs = new ArrayList<ReplaceableAttribute>();
Map<String, String> fieldToValueMap = cluster.getFieldToValueMap();
for (Map.Entry<String, String> entry : fieldToValueMap.entrySet()) {
attrs.add(new ReplaceableAttribute(entry.getKey(), StringUtils.left(entry.getValue(), MAX_ATTR_SIZE),
true));
}
PutAttributesRequest putReqest = new PutAttributesRequest(domain, getSimpleDBItemName(cluster), attrs);
LOGGER.debug(String.format("Saving cluster %s to SimpleDB domain %s",
cluster.getName(), domain));
this.simpleDBClient.putAttributes(putReqest);
LOGGER.debug("Successfully saved.");
}
/**
* Gets the clusters for a list of regions. If the regions parameter is empty, returns the clusters
* for all regions.
*/
@Override
public List<Cluster> getAllClusters(String... regions) {
return getClusters(null, regions);
}
@Override
public List<Cluster> getNonconformingClusters(String... regions) {
return getClusters(false, regions);
}
@Override
public Cluster getCluster(String clusterName, String region) {
Validate.notEmpty(clusterName);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where cluster = '%s' and region = '%s'",
domain, clusterName, region));
LOGGER.info(String.format("Query is to get the cluster is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
Validate.isTrue(items.size() <= 1);
if (items.size() == 0) {
LOGGER.info(String.format("Not found cluster with name %s in region %s", clusterName, region));
return null;
} else {
Cluster cluster = null;
try {
cluster = parseCluster(items.get(0));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a cluster.", items.get(0)));
}
return cluster;
}
}
@Override
public void deleteClusters(Cluster... clusters) {
Validate.notNull(clusters);
LOGGER.info(String.format("Deleting %d clusters", clusters.length));
for (Cluster cluster : clusters) {
LOGGER.info(String.format("Deleting cluster %s", cluster.getName()));
simpleDBClient.deleteAttributes(new DeleteAttributesRequest(domain, getSimpleDBItemName(cluster)));
LOGGER.info(String.format("Successfully deleted cluster %s", cluster.getName()));
}
}
private List<Cluster> getClusters(Boolean conforming, String... regions) {
Validate.notNull(regions);
List<Cluster> clusters = Lists.newArrayList();
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where cluster is not null and ", domain));
boolean needsAnd = false;
if (regions.length != 0) {
query.append(String.format("region in ('%s') ", StringUtils.join(regions, "','")));
needsAnd = true;
}
if (conforming != null) {
if (needsAnd) {
query.append(" and ");
}
query.append(String.format("isConforming = '%s'", conforming));
}
LOGGER.info(String.format("Query to retrieve clusters for regions %s is '%s'",
StringUtils.join(regions, "','"), query.toString()));
List<Item> items = querySimpleDBItems(query.toString());
for (Item item : items) {
try {
clusters.add(parseCluster(item));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a cluster.", item), e);
}
}
LOGGER.info(String.format("Retrieved %d clusters from SimpleDB in domain %s and regions %s",
clusters.size(), domain, StringUtils.join(regions, "','")));
return clusters;
}
/**
* Parses a SimpleDB item into a cluster.
* @param item the item from SimpleDB
* @return the cluster for the SimpleDB item
*/
protected Cluster parseCluster(Item item) {
Map<String, String> fieldToValue = new HashMap<String, String>();
for (Attribute attr : item.getAttributes()) {
String name = attr.getName();
String value = attr.getValue();
if (name != null && value != null) {
fieldToValue.put(name, value);
}
}
return Cluster.parseFieldToValueMap(fieldToValue);
}
/**
* Gets the unique SimpleDB item name for a cluster. The subclass can override this
* method to generate the item name differently.
* @param cluster
* @return the SimpleDB item name for the cluster
*/
protected String getSimpleDBItemName(Cluster cluster) {
return String.format("%s-%s", cluster.getName(), cluster.getRegion());
}
private List<Item> querySimpleDBItems(String query) {
Validate.notNull(query);
String nextToken = null;
List<Item> items = new ArrayList<Item>();
do {
SelectRequest request = new SelectRequest(query);
request.setNextToken(nextToken);
request.setConsistentRead(Boolean.TRUE);
SelectResult result = this.simpleDBClient.select(request);
items.addAll(result.getItems());
nextToken = result.getNextToken();
} while (nextToken != null);
return items;
}
}
| 4,827 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/crawler/AWSClusterCrawler.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.crawler;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.autoscaling.model.SuspendedProcess;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.ClusterCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The class implementing a crawler that gets the auto scaling groups from AWS.
*/
public class AWSClusterCrawler implements ClusterCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AWSClusterCrawler.class);
private static final String NS = "simianarmy.conformity.cluster";
/** The map from region to the aws client in the region. */
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private final MonkeyConfiguration cfg;
/**
* Instantiates a new cluster crawler.
*
* @param regionToAwsClient
* the map from region to the corresponding aws client for the region
*/
public AWSClusterCrawler(Map<String, AWSClient> regionToAwsClient, MonkeyConfiguration cfg) {
Validate.notNull(regionToAwsClient);
Validate.notNull(cfg);
for (Map.Entry<String, AWSClient> entry : regionToAwsClient.entrySet()) {
this.regionToAwsClient.put(entry.getKey(), entry.getValue());
}
this.cfg = cfg;
}
/**
* In this implementation, every auto scaling group is considered a cluster.
* @param clusterNames
* the cluster names
* @return the list of clusters matching the names, when names are empty, return all clusters
*/
@Override
public List<Cluster> clusters(String... clusterNames) {
List<Cluster> list = Lists.newArrayList();
for (Map.Entry<String, AWSClient> entry : regionToAwsClient.entrySet()) {
String region = entry.getKey();
AWSClient awsClient = entry.getValue();
Set<String> asgInstances = Sets.newHashSet();
LOGGER.info(String.format("Crawling clusters in region %s", region));
for (AutoScalingGroup asg : awsClient.describeAutoScalingGroups(clusterNames)) {
List<String> instances = Lists.newArrayList();
for (Instance instance : asg.getInstances()) {
instances.add(instance.getInstanceId());
asgInstances.add(instance.getInstanceId());
}
com.netflix.simianarmy.conformity.AutoScalingGroup conformityAsg =
new com.netflix.simianarmy.conformity.AutoScalingGroup(
asg.getAutoScalingGroupName(),
instances.toArray(new String[instances.size()]));
for (SuspendedProcess sp : asg.getSuspendedProcesses()) {
if ("AddToLoadBalancer".equals(sp.getProcessName())) {
LOGGER.info(String.format("ASG %s is suspended: %s", asg.getAutoScalingGroupName(),
asg.getSuspendedProcesses()));
conformityAsg.setSuspended(true);
}
}
Cluster cluster = new Cluster(asg.getAutoScalingGroupName(), region, conformityAsg);
List<TagDescription> tagDescriptions = asg.getTags();
for (TagDescription tagDescription : tagDescriptions) {
if ( BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY.equalsIgnoreCase(tagDescription.getKey()) ) {
String value = tagDescription.getValue();
if (value != null) {
cluster.setOwnerEmail(value);
}
}
}
updateCluster(cluster);
list.add(cluster);
}
//Cluster containing all solo instances
Set<String> instances = Sets.newHashSet();
for (com.amazonaws.services.ec2.model.Instance awsInstance : awsClient.describeInstances()) {
if (!asgInstances.contains(awsInstance.getInstanceId())) {
LOGGER.info(String.format("Adding instance %s to soloInstances cluster.",
awsInstance.getInstanceId()));
instances.add(awsInstance.getInstanceId());
}
}
//Only create cluster if we have solo instances.
if (!instances.isEmpty()) {
Cluster cluster = new Cluster("SoloInstances", region, instances);
updateCluster(cluster);
list.add(cluster);
}
}
return list;
}
private void updateCluster(Cluster cluster) {
updateExcludedConformityRules(cluster);
cluster.setOwnerEmail(getOwnerEmailForCluster(cluster));
String prop = String.format("simianarmy.conformity.cluster.%s.optedOut", cluster.getName());
if (cfg.getBoolOrElse(prop, false)) {
LOGGER.info(String.format("Cluster %s is opted out of Conformity Monkey.", cluster.getName()));
cluster.setOptOutOfConformity(true);
} else {
cluster.setOptOutOfConformity(false);
}
}
/**
* Gets the owner email from the monkey configuration.
* @param cluster
* the cluster
* @return the owner email if it is defined in the configuration, null otherwise.
*/
@Override
public String getOwnerEmailForCluster(Cluster cluster) {
String prop = String.format("%s.%s.ownerEmail", NS, cluster.getName());
String ownerEmail = cfg.getStr(prop);
if (ownerEmail == null) {
ownerEmail = cluster.getOwnerEmail();
if (ownerEmail == null) {
LOGGER.info(String.format("No owner email is found for cluster %s in configuration "
+ "%s or tag %s.", cluster.getName(), prop, BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY));
} else {
LOGGER.info(String.format("Found owner email %s for cluster %s in tag %s.",
ownerEmail, cluster.getName(), BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY));
return ownerEmail;
}
} else {
LOGGER.info(String.format("Found owner email %s for cluster %s in configuration %s.",
ownerEmail, cluster.getName(), prop));
}
return ownerEmail;
}
@Override
public void updateExcludedConformityRules(Cluster cluster) {
String prop = String.format("%s.%s.excludedRules", NS, cluster.getName());
String excludedRules = cfg.getStr(prop);
if (StringUtils.isNotBlank(excludedRules)) {
LOGGER.info(String.format("Excluded rules for cluster %s are : %s", cluster.getName(), excludedRules));
cluster.excludeRules(StringUtils.split(excludedRules, ","));
}
}
}
| 4,828 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/BasicConformityEurekaClient.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.DiscoveryClient;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Set;
/**
* The class implementing a client to access Eureda for getting instance information that is used
* by Conformity Monkey.
*/
public class BasicConformityEurekaClient implements ConformityEurekaClient {
private static final Logger LOGGER = LoggerFactory.getLogger(BasicConformityEurekaClient.class);
private final DiscoveryClient discoveryClient;
/**
* Constructor.
* @param discoveryClient the client to access Discovery/Eureka service.
*/
public BasicConformityEurekaClient(DiscoveryClient discoveryClient) {
this.discoveryClient = discoveryClient;
}
@Override
public boolean hasHealthCheckUrl(String region, String instanceId) {
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
for (InstanceInfo info : instanceInfos) {
Set<String> healthCheckUrls = info.getHealthCheckUrls();
if (healthCheckUrls != null && !healthCheckUrls.isEmpty()) {
return true;
}
}
return false;
}
@Override
public boolean hasStatusUrl(String region, String instanceId) {
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
for (InstanceInfo info : instanceInfos) {
String statusPageUrl = info.getStatusPageUrl();
if (!StringUtils.isEmpty(statusPageUrl)) {
return true;
}
}
return false;
}
@Override
public boolean isHealthy(String region, String instanceId) {
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
if (instanceInfos.isEmpty()) {
LOGGER.info(String.format("Instance %s is not registered in Eureka in region %s.", instanceId, region));
return false;
} else {
for (InstanceInfo info : instanceInfos) {
InstanceInfo.InstanceStatus status = info.getStatus();
if (!status.equals(InstanceInfo.InstanceStatus.UP)
&& !status.equals(InstanceInfo.InstanceStatus.STARTING)) {
LOGGER.info(String.format("Instance %s is not healthy in Eureka with status %s.",
instanceId, status.name()));
return false;
}
}
}
return true;
}
}
| 4,829 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/SameZonesInElbAndAsg.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* The class implementing a conformity rule that checks if the zones in ELB and ASG are the same.
*/
public class SameZonesInElbAndAsg implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private AWSCredentialsProvider awsCredentialsProvider;
private static final String RULE_NAME = "SameZonesInElbAndAsg";
private static final String REASON = "Availability zones of ELB and ASG are different";
/**
* Constructs an instance with the default AWS credentials provider chain.
* @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain
*/
public SameZonesInElbAndAsg() {
this(new DefaultAWSCredentialsProviderChain());
}
/**
* Constructs an instance with the passed AWS Credential Provider.
* @param awsCredentialsProvider
*/
public SameZonesInElbAndAsg(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public Conformity check(Cluster cluster) {
List<String> asgNames = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
asgNames.add(asg.getName());
}
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
List<String> asgZones = getAvailabilityZonesForAsg(cluster.getRegion(), asg.getName());
for (String lbName : getLoadBalancerNamesForAsg(cluster.getRegion(), asg.getName())) {
List<String> lbZones = getAvailabilityZonesForLoadBalancer(cluster.getRegion(), lbName);
if (!haveSameZones(asgZones, lbZones)) {
LOGGER.info(String.format("ASG %s and ELB %s do not have the same availability zones",
asgZones, lbZones));
failedComponents.add(lbName);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
/**
* Gets the load balancer names of an ASG. Can be overridden in subclasses.
* @param region the region
* @param asgName the ASG name
* @return the list of load balancer names
*/
protected List<String> getLoadBalancerNamesForAsg(String region, String asgName) {
List<com.amazonaws.services.autoscaling.model.AutoScalingGroup> asgs =
getAwsClient(region).describeAutoScalingGroups(asgName);
if (asgs.isEmpty()) {
LOGGER.error(String.format("Not found ASG with name %s", asgName));
return Collections.emptyList();
} else {
return asgs.get(0).getLoadBalancerNames();
}
}
/**
* Gets the list of availability zones for an ASG. Can be overridden in subclasses.
* @param region the region
* @param asgName the ASG name.
* @return the list of the availability zones that the ASG has.
*/
protected List<String> getAvailabilityZonesForAsg(String region, String asgName) {
List<com.amazonaws.services.autoscaling.model.AutoScalingGroup> asgs =
getAwsClient(region).describeAutoScalingGroups(asgName);
if (asgs.isEmpty()) {
LOGGER.error(String.format("Not found ASG with name %s", asgName));
return null;
} else {
return asgs.get(0).getAvailabilityZones();
}
}
/**
* Gets the list of availability zones for a load balancer. Can be overridden in subclasses.
* @param region the region
* @param lbName the load balancer name.
* @return the list of the availability zones that the load balancer has.
*/
protected List<String> getAvailabilityZonesForLoadBalancer(String region, String lbName) {
List<LoadBalancerDescription> lbs =
getAwsClient(region).describeElasticLoadBalancers(lbName);
if (lbs.isEmpty()) {
LOGGER.error(String.format("Not found load balancer with name %s", lbName));
return null;
} else {
return lbs.get(0).getAvailabilityZones();
}
}
private AWSClient getAwsClient(String region) {
AWSClient awsClient = regionToAwsClient.get(region);
if (awsClient == null) {
awsClient = new AWSClient(region, awsCredentialsProvider);
regionToAwsClient.put(region, awsClient);
}
return awsClient;
}
private boolean haveSameZones(List<String> zones1, List<String> zones2) {
if (zones1 == null || zones2 == null) {
return true;
}
if (zones1.size() != zones1.size()) {
return false;
}
for (String zone : zones1) {
if (!zones2.contains(zone)) {
return false;
}
}
for (String zone : zones2) {
if (!zones1.contains(zone)) {
return false;
}
}
return true;
}
}
| 4,830 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceTooOld.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* The class implementing a conformity rule that checks if there are instances that are older than certain days.
* Instances are not considered to be permanent in the cloud, so sometimes having too old instances could indicate
* potential issues.
*/
public class InstanceTooOld implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private static final String RULE_NAME = "InstanceTooOld";
private final String reason;
private final int instanceAgeThreshold;
private AWSCredentialsProvider awsCredentialsProvider;
/**
* Constructor.
* @param instanceAgeThreshold
* The age in days that makes an instance be considered too old.
*/
public InstanceTooOld(int instanceAgeThreshold) {
this(new DefaultAWSCredentialsProviderChain(), instanceAgeThreshold);
}
/**
* Constructor.
* @param awsCredentialsProvider
* The AWS credentials provider
* @param instanceAgeThreshold
* The age in days that makes an instance be considered too old.
*/
public InstanceTooOld(AWSCredentialsProvider awsCredentialsProvider, int instanceAgeThreshold) {
this.awsCredentialsProvider = awsCredentialsProvider;
Validate.isTrue(instanceAgeThreshold > 0);
this.instanceAgeThreshold = instanceAgeThreshold;
this.reason = String.format("Instances are older than %d days", instanceAgeThreshold);
}
@Override
public Conformity check(Cluster cluster) {
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
instanceIds.addAll(asg.getInstances());
}
Map<String, Long> instanceIdToLaunchTime = getInstanceLaunchTimes(
cluster.getRegion(), instanceIds.toArray(new String[instanceIds.size()]));
Collection<String> failedComponents = Lists.newArrayList();
long creationTimeThreshold = DateTime.now().minusDays(instanceAgeThreshold).getMillis();
for (Map.Entry<String, Long> entry : instanceIdToLaunchTime.entrySet()) {
String instanceId = entry.getKey();
if (creationTimeThreshold > entry.getValue()) {
LOGGER.info(String.format("Instance %s was created more than %d days ago",
instanceId, instanceAgeThreshold));
failedComponents.add(instanceId);
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return reason;
}
/**
* Gets the launch time (in milliseconds) for a list of instance ids of the same region. The default
* implementation is using an AWS client. The method can be overridden in subclasses to get the instance
* launch times differently.
* @param region
* the region of the instances
* @param instanceIds
* the instance ids, all instances should be in the same region.
* @return
* the map from instance id to the launch time in milliseconds
*/
protected Map<String, Long> getInstanceLaunchTimes(String region, String... instanceIds) {
Map<String, Long> result = Maps.newHashMap();
if (instanceIds == null || instanceIds.length == 0) {
return result;
}
AWSClient awsClient = new AWSClient(region, awsCredentialsProvider);
for (Instance instance : awsClient.describeInstances(instanceIds)) {
if (instance.getLaunchTime() != null) {
result.put(instance.getInstanceId(), instance.getLaunchTime().getTime());
} else {
LOGGER.warn(String.format("No launch time found for instance %s", instance.getInstanceId()));
}
}
return result;
}
}
| 4,831 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceHasStatusUrl.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* The class implementing a conformity rule that checks if all instances in a cluster has status url.
*/
public class InstanceHasStatusUrl implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private static final String RULE_NAME = "InstanceHasStatusUrl";
private static final String REASON = "Status url not defined";
private final ConformityEurekaClient conformityEurekaClient;
/**
* Constructor.
* @param conformityEurekaClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public InstanceHasStatusUrl(ConformityEurekaClient conformityEurekaClient) {
Validate.notNull(conformityEurekaClient);
this.conformityEurekaClient = conformityEurekaClient;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
if (asg.isSuspended()) {
continue;
}
for (String instance : asg.getInstances()) {
if (!conformityEurekaClient.hasStatusUrl(cluster.getRegion(), instance)) {
LOGGER.info(String.format("Instance %s does not have a status page url in discovery.",
instance));
failedComponents.add(instance);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
}
| 4,832 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceInVPC.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The class implements a conformity rule to check an instance is in a virtual private cloud.
*/
public class InstanceInVPC implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceInVPC.class);
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private AWSCredentialsProvider awsCredentialsProvider;
private static final String RULE_NAME = "InstanceInVPC";
private static final String REASON = "VPC_ID not defined";
/**
* Constructs an instance with the default AWS credentials provider chain.
* @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain
*/
public InstanceInVPC() {
this(new DefaultAWSCredentialsProviderChain());
}
/**
* Constructs an instance with the passed AWS credentials provider.
* @param awsCredentialsProvider
* The AWS credentials provider
*/
public InstanceInVPC(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
//check all instances
Set<String> failedInstances = checkInstancesInVPC(cluster.getRegion(), cluster.getSoloInstances());
failedComponents.addAll(failedInstances);
//check asg instances
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
if (asg.isSuspended()) {
continue;
}
Set<String> asgFailedInstances = checkInstancesInVPC(cluster.getRegion(), asg.getInstances());
failedComponents.addAll(asgFailedInstances);
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
private AWSClient getAwsClient(String region) {
AWSClient awsClient = regionToAwsClient.get(region);
if (awsClient == null) {
awsClient = new AWSClient(region, awsCredentialsProvider);
regionToAwsClient.put(region, awsClient);
}
return awsClient;
}
private Set<String> checkInstancesInVPC(String region, Collection<String> instances) {
Set<String> failedInstances = Sets.newHashSet();
for (String instanceId : instances) {
for (Instance awsInstance : getAWSInstances(region, instanceId)) {
if (awsInstance.getVpcId() == null) {
LOGGER.info(String.format("Instance %s is not in a virtual private cloud", instanceId));
failedInstances.add(instanceId);
}
}
}
return failedInstances;
}
/**
* Gets the list of AWS instances. Can be overridden
* @param region the region
* @param instanceId the instance id.
* @return the list of the AWS instances with the given id.
*/
protected List<Instance> getAWSInstances(String region, String instanceId) {
AWSClient awsClient = getAwsClient(region);
return awsClient.describeInstances(instanceId);
}
} | 4,833 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceHasHealthCheckUrl.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* The class implementing a conformity rule that checks if all instances in a cluster has health check url
* in Discovery/Eureka.
*/
public class InstanceHasHealthCheckUrl implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasHealthCheckUrl.class);
private static final String RULE_NAME = "InstanceHasHealthCheckUrl";
private static final String REASON = "Health check url not defined";
private final ConformityEurekaClient conformityEurekaClient;
/**
* Constructor.
* @param conformityEurekaClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public InstanceHasHealthCheckUrl(ConformityEurekaClient conformityEurekaClient) {
Validate.notNull(conformityEurekaClient);
this.conformityEurekaClient = conformityEurekaClient;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
if (asg.isSuspended()) {
continue;
}
for (String instance : asg.getInstances()) {
if (!conformityEurekaClient.hasHealthCheckUrl(cluster.getRegion(), instance)) {
LOGGER.info(String.format("Instance %s does not have health check url in discovery.",
instance));
failedComponents.add(instance);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
}
| 4,834 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/CrossZoneLoadBalancing.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import com.netflix.simianarmy.client.MonkeyRestClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerAttributes;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
/**
* The class implementing a conformity rule that checks if the cross-zone load balancing is enabled
* for all cluster ELBs.
*/
public class CrossZoneLoadBalancing implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(CrossZoneLoadBalancing.class);
private final Map<String, AWSClient> regionToAwsClient = Maps.newHashMap();
private AWSCredentialsProvider awsCredentialsProvider;
private static final String RULE_NAME = "CrossZoneLoadBalancing";
private static final String REASON = "Cross-zone load balancing is disabled";
/**
* Constructs an instance with the default AWS credentials provider chain.
* @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain
*/
public CrossZoneLoadBalancing() {
this(new DefaultAWSCredentialsProviderChain());
}
/**
* Constructs an instance with the passed AWS Credential Provider.
* @param awsCredentialsProvider
*/
public CrossZoneLoadBalancing(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
try {
for (String lbName : getLoadBalancerNamesForAsg(cluster.getRegion(), asg.getName())) {
if (!isCrossZoneLoadBalancingEnabled(cluster.getRegion(), lbName)) {
LOGGER.info(String.format("ELB %s in %s does not have cross-zone load balancing enabled",
lbName, cluster.getRegion()));
failedComponents.add(lbName);
}
}
} catch (MonkeyRestClient.DataReadException e) {
LOGGER.error(String.format("Transient error reading ELB for %s in %s - skipping this check",
asg.getName(), cluster.getRegion()), e);
}
}
return new Conformity(getName(), failedComponents);
}
/**
* Gets the cross-zone load balancing option for an ELB. Can be overridden in subclasses.
* @param region the region
* @param lbName the ELB name
* @return {@code true} if cross-zone load balancing is enabled
*/
protected boolean isCrossZoneLoadBalancingEnabled(String region, String lbName) {
LoadBalancerAttributes attrs = getAwsClient(region).describeElasticLoadBalancerAttributes(lbName);
return attrs.getCrossZoneLoadBalancing().isEnabled();
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
/**
* Gets the load balancer names of an ASG. Can be overridden in subclasses.
* @param region the region
* @param asgName the ASG name
* @return the list of load balancer names
*/
protected List<String> getLoadBalancerNamesForAsg(String region, String asgName) {
List<com.amazonaws.services.autoscaling.model.AutoScalingGroup> asgs =
getAwsClient(region).describeAutoScalingGroups(asgName);
if (asgs.isEmpty()) {
LOGGER.error(String.format("Not found ASG with name %s", asgName));
return Collections.emptyList();
} else {
return asgs.get(0).getLoadBalancerNames();
}
}
private AWSClient getAwsClient(String region) {
AWSClient awsClient = regionToAwsClient.get(region);
if (awsClient == null) {
awsClient = new AWSClient(region, awsCredentialsProvider);
regionToAwsClient.put(region, awsClient);
}
return awsClient;
}
}
| 4,835 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/ConformityEurekaClient.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
/**
* The interface for a client to access Eureka service to get the status of instances for Conformity Monkey.
*/
public interface ConformityEurekaClient {
/**
* Checks whether an instance has health check url in Eureka.
* @param region the region of the instance
* @param instanceId the instance id
* @return true if the instance has health check url in Eureka, false otherwise.
*/
boolean hasHealthCheckUrl(String region, String instanceId);
/**
* Checks whether an instance has status url in Eureka.
* @param region the region of the instance
* @param instanceId the instance id
* @return true if the instance has status url in Eureka, false otherwise.
*/
boolean hasStatusUrl(String region, String instanceId);
/**
* Checks whether an instance is healthy in Eureka.
* @param region the region of the instance
* @param instanceId the instance id
* @return true if the instance is healthy in Eureka, false otherwise.
*/
boolean isHealthy(String region, String instanceId);
}
| 4,836 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceInSecurityGroup.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.ec2.model.GroupIdentifier;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* The class implementing a conformity rule that checks whether or not all instances in a cluster are in
* specific security groups.
*/
public class InstanceInSecurityGroup implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceHasStatusUrl.class);
private static final String RULE_NAME = "InstanceInSecurityGroup";
private final String reason;
private final Collection<String> requiredSecurityGroupNames = Sets.newHashSet();
private AWSCredentialsProvider awsCredentialsProvider;
/**
* Constructor.
* @param requiredSecurityGroupNames
* The security group names that are required to have for every instance of a cluster.
*/
public InstanceInSecurityGroup(String... requiredSecurityGroupNames) {
this(new DefaultAWSCredentialsProviderChain(), requiredSecurityGroupNames);
}
/**
* Constructor.
* @param awsCredentialsProvider
* The AWS credentials provider
* @param requiredSecurityGroupNames
* The security group names that are required to have for every instance of a cluster.
*/
public InstanceInSecurityGroup(AWSCredentialsProvider awsCredentialsProvider, String... requiredSecurityGroupNames)
{
this.awsCredentialsProvider = awsCredentialsProvider;
Validate.notNull(requiredSecurityGroupNames);
for (String sgName : requiredSecurityGroupNames) {
Validate.notNull(sgName);
this.requiredSecurityGroupNames.add(sgName.trim());
}
this.reason = String.format("Instances are not part of security groups (%s)",
StringUtils.join(this.requiredSecurityGroupNames, ","));
}
@Override
public Conformity check(Cluster cluster) {
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
instanceIds.addAll(asg.getInstances());
}
Collection<String> failedComponents = Lists.newArrayList();
if (instanceIds.size() != 0) {
Map<String, List<String>> instanceIdToSecurityGroup = getInstanceSecurityGroups(
cluster.getRegion(), instanceIds.toArray(new String[instanceIds.size()]));
for (Map.Entry<String, List<String>> entry : instanceIdToSecurityGroup.entrySet()) {
String instanceId = entry.getKey();
if (!checkSecurityGroups(entry.getValue())) {
LOGGER.info(String.format("Instance %s does not have all required security groups", instanceId));
failedComponents.add(instanceId);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return reason;
}
/**
* Checks whether the collection of security group names are valid. The default implementation here is to check
* whether the security groups contain the required security groups. The method can be overridden for different
* rules.
* @param sgNames
* The collection of security group names
* @return
* true if the security group names are valid, false otherwise.
*/
protected boolean checkSecurityGroups(Collection<String> sgNames) {
for (String requiredSg : requiredSecurityGroupNames) {
if (!sgNames.contains(requiredSg)) {
LOGGER.info(String.format("Required security group %s is not found.", requiredSg));
return false;
}
}
return true;
}
/**
* Gets the security groups for a list of instance ids of the same region. The default implementation
* is using an AWS client. The method can be overridden in subclasses to get the security groups differently.
* @param region
* the region of the instances
* @param instanceIds
* the instance ids, all instances should be in the same region.
* @return
* the map from instance id to the list of security group names the instance has
*/
protected Map<String, List<String>> getInstanceSecurityGroups(String region, String... instanceIds) {
Map<String, List<String>> result = Maps.newHashMap();
if (instanceIds == null || instanceIds.length == 0) {
return result;
}
AWSClient awsClient = new AWSClient(region, awsCredentialsProvider);
for (Instance instance : awsClient.describeInstances(instanceIds)) {
// Ignore instances that are in VPC
if (StringUtils.isNotEmpty(instance.getVpcId())) {
LOGGER.info(String.format("Instance %s is in VPC and is ignored.", instance.getInstanceId()));
continue;
}
if (!"running".equals(instance.getState().getName())) {
LOGGER.info(String.format("Instance %s is not running, state is %s.",
instance.getInstanceId(), instance.getState().getName()));
continue;
}
List<String> sgs = Lists.newArrayList();
for (GroupIdentifier groupId : instance.getSecurityGroups()) {
sgs.add(groupId.getGroupName());
}
result.put(instance.getInstanceId(), sgs);
}
return result;
}
}
| 4,837 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/conformity/rule/InstanceIsHealthyInEureka.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.conformity.rule;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.conformity.AutoScalingGroup;
import com.netflix.simianarmy.conformity.Cluster;
import com.netflix.simianarmy.conformity.Conformity;
import com.netflix.simianarmy.conformity.ConformityRule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* The class implements a conformity rule to check if all instances in the cluster are healthy in Discovery.
*/
public class InstanceIsHealthyInEureka implements ConformityRule {
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceIsHealthyInEureka.class);
private static final String RULE_NAME = "InstanceIsHealthyInEureka";
private static final String REASON = "Instances are not 'UP' in Eureka.";
private final ConformityEurekaClient conformityEurekaClient;
/**
* Constructor.
* @param conformityEurekaClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public InstanceIsHealthyInEureka(ConformityEurekaClient conformityEurekaClient) {
Validate.notNull(conformityEurekaClient);
this.conformityEurekaClient = conformityEurekaClient;
}
@Override
public Conformity check(Cluster cluster) {
Collection<String> failedComponents = Lists.newArrayList();
for (AutoScalingGroup asg : cluster.getAutoScalingGroups()) {
// ignore suspended ASGs
if (asg.isSuspended()) {
LOGGER.info(String.format("ASG %s is suspended, ignore.", asg.getName()));
continue;
}
for (String instance : asg.getInstances()) {
if (!conformityEurekaClient.isHealthy(cluster.getRegion(), instance)) {
LOGGER.info(String.format("Instance %s is not healthy in Eureka.", instance));
failedComponents.add(instance);
}
}
}
return new Conformity(getName(), failedComponents);
}
@Override
public String getName() {
return RULE_NAME;
}
@Override
public String getNonconformingReason() {
return REASON;
}
}
| 4,838 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/EBSVolumeJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for EBS volume cleanup.
*/
public class EBSVolumeJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSVolumeJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public EBSVolumeJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.EBS_VOLUME);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting EBS volume %s", resource.getId()));
awsClient.deleteVolume(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 4,839 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/ImageJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Janitor responsible for launch configuration cleanup.
*/
public class ImageJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ImageJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public ImageJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.IMAGE);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting image %s", resource.getId()));
awsClient.deleteImage(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 4,840 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/SimpleDBJanitorResourceTracker.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.amazonaws.services.simpledb.AmazonSimpleDB;
import com.amazonaws.services.simpledb.model.*;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorResourceTracker;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The JanitorResourceTracker implementation in SimpleDB.
*/
public class SimpleDBJanitorResourceTracker implements JanitorResourceTracker {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleDBJanitorResourceTracker.class);
/** The domain. */
private final String domain;
/** The SimpleDB client. */
private final AmazonSimpleDB simpleDBClient;
/**
* Instantiates a new simple db resource tracker.
*
* @param awsClient
* the AWS Client
* @param domain
* the domain
*/
public SimpleDBJanitorResourceTracker(AWSClient awsClient, String domain) {
this.domain = domain;
this.simpleDBClient = awsClient.sdbClient();
}
/**
* Gets the SimpleDB client.
* @return the SimpleDB client
*/
protected AmazonSimpleDB getSimpleDBClient() {
return simpleDBClient;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Resource resource) {
List<ReplaceableAttribute> attrs = new ArrayList<ReplaceableAttribute>();
Map<String, String> fieldToValueMap = resource.getFieldToValueMap();
for (Map.Entry<String, String> entry : fieldToValueMap.entrySet()) {
attrs.add(new ReplaceableAttribute(entry.getKey(), entry.getValue(), true));
}
PutAttributesRequest putReqest = new PutAttributesRequest(domain, getSimpleDBItemName(resource), attrs);
LOGGER.debug(String.format("Saving resource %s to SimpleDB domain %s",
resource.getId(), domain));
this.simpleDBClient.putAttributes(putReqest);
LOGGER.debug("Successfully saved.");
}
/**
* Returns a list of AWSResource objects. You need to override this method if more
* specific resource types (e.g. subtypes of AWSResource) need to be obtained from
* the SimpleDB.
*/
@Override
public List<Resource> getResources(ResourceType resourceType, CleanupState state, String resourceRegion) {
Validate.notEmpty(resourceRegion);
List<Resource> resources = new ArrayList<Resource>();
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where ", domain));
if (resourceType != null) {
query.append(String.format("resourceType='%s' and ", resourceType));
}
if (state != null) {
query.append(String.format("state='%s' and ", state));
}
query.append(String.format("region='%s'", resourceRegion));
LOGGER.debug(String.format("Query is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
for (Item item : items) {
try {
resources.add(parseResource(item));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a resource.", item));
}
}
LOGGER.info(String.format("Retrieved %d resources from SimpleDB in domain %s for resource type %s"
+ " and state %s and region %s",
resources.size(), domain, resourceType, state, resourceRegion));
return resources;
}
@Override
public Resource getResource(String resourceId) {
Validate.notEmpty(resourceId);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where resourceId = '%s'", domain, resourceId));
LOGGER.debug(String.format("Query is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
Validate.isTrue(items.size() <= 1);
if (items.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s", resourceId));
return null;
} else {
Resource resource = null;
try {
resource = parseResource(items.get(0));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a resource.", items.get(0)));
}
return resource;
}
}
@Override
public Resource getResource(String resourceId, String region) {
Validate.notEmpty(resourceId);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from `%s` where resourceId = '%s' and region = '%s'", domain, resourceId, region));
LOGGER.debug(String.format("Query is '%s'", query));
List<Item> items = querySimpleDBItems(query.toString());
Validate.isTrue(items.size() <= 1);
if (items.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s and region %s", resourceId, region));
return null;
} else {
Resource resource = null;
try {
resource = parseResource(items.get(0));
} catch (Exception e) {
// Ignore the item that cannot be parsed.
LOGGER.error(String.format("SimpleDB item %s cannot be parsed into a resource.", items.get(0)));
}
return resource;
}
}
/**
* Parses a SimpleDB item into an AWS resource.
* @param item the item from SimpleDB
* @return the AWSResource object for the SimpleDB item
*/
protected Resource parseResource(Item item) {
Map<String, String> fieldToValue = new HashMap<String, String>();
for (Attribute attr : item.getAttributes()) {
String name = attr.getName();
String value = attr.getValue();
if (name != null && value != null) {
fieldToValue.put(name, value);
}
}
return AWSResource.parseFieldtoValueMap(fieldToValue);
}
/**
* Gets the unique SimpleDB item name for a resource. The subclass can override this
* method to generate the item name differently.
* @param resource
* @return the SimpleDB item name for the resource
*/
protected String getSimpleDBItemName(Resource resource) {
return String.format("%s-%s-%s", resource.getResourceType().name(), resource.getId(), resource.getRegion());
}
private List<Item> querySimpleDBItems(String query) {
Validate.notNull(query);
String nextToken = null;
List<Item> items = new ArrayList<Item>();
do {
SelectRequest request = new SelectRequest(query);
request.setNextToken(nextToken);
request.setConsistentRead(Boolean.TRUE);
SelectResult result = this.simpleDBClient.select(request);
items.addAll(result.getItems());
nextToken = result.getNextToken();
} while (nextToken != null);
return items;
}
}
| 4,841 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/EBSSnapshotJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for EBS snapshot cleanup.
*/
public class EBSSnapshotJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSSnapshotJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public EBSSnapshotJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.EBS_SNAPSHOT);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting EBS snapshot %s", resource.getId()));
awsClient.deleteSnapshot(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 4,842 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/LaunchConfigJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Janitor responsible for launch configuration cleanup.
*/
public class LaunchConfigJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(LaunchConfigJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public LaunchConfigJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.LAUNCH_CONFIG);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting launch configuration %s", resource.getId()));
awsClient.deleteLaunchConfiguration(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 4,843 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/VolumeTaggingMonkey.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.ec2.model.Tag;
import com.amazonaws.services.ec2.model.Volume;
import com.amazonaws.services.ec2.model.VolumeAttachment;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.MonkeyType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorMonkey;
/**
* A companion monkey of Janitor Monkey for tagging EBS volumes with the last attachment information.
* In many scenarios, EBS volumes generated by applications remain unattached to instances. Amazon
* does not keep track of last unattached time, which makes it difficult to determine its usage.
* To solve this, this monkey will tag all EBS volumes with last owner and instance to which they are attached
* and the time they got detached from instance. The monkey will poll and monitor EBS volumes hourly (by default).
*
*/
public class VolumeTaggingMonkey extends Monkey {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(VolumeTaggingMonkey.class);
/**
* The Interface Context.
*/
public interface Context extends Monkey.Context {
/**
* Configuration.
*
* @return the monkey configuration
*/
@Override
MonkeyConfiguration configuration();
/**
* AWS clients. Using a collection of clients for supporting running one monkey for multiple regions.
*
* @return the collection of AWS clients
*/
Collection<AWSClient> awsClients();
}
private final MonkeyConfiguration config;
private final Collection<AWSClient> awsClients;
private final MonkeyCalendar calendar;
/** We cache the global map from instance id to its owner when starting the monkey. */
private final Map<AWSClient, Map<String, String>> awsClientToInstanceToOwner;
/**
* The constructor.
* @param ctx the context
*/
public VolumeTaggingMonkey(Context ctx) {
super(ctx);
this.config = ctx.configuration();
this.awsClients = ctx.awsClients();
this.calendar = ctx.calendar();
awsClientToInstanceToOwner = Maps.newHashMap();
for (AWSClient awsClient : awsClients) {
Map<String, String> instanceToOwner = Maps.newHashMap();
awsClientToInstanceToOwner.put(awsClient, instanceToOwner);
for (Instance instance : awsClient.describeInstances()) {
for (Tag tag : instance.getTags()) {
if (tag.getKey().equals(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY)) {
instanceToOwner.put(instance.getInstanceId(), tag.getValue());
}
}
}
}
}
/**
* The monkey Type.
*/
public enum Type implements MonkeyType {
/** Volume tagging monkey. */
VOLUME_TAGGING
}
/**
* The event types that this monkey causes.
*/
public enum EventTypes implements EventType {
/** The event type for tagging the volume with Janitor meta data information. */
TAGGING_JANITOR
}
@Override
public Type type() {
return Type.VOLUME_TAGGING;
}
@Override
public void doMonkeyBusiness() {
String prop = "simianarmy.volumeTagging.enabled";
if (config.getBoolOrElse(prop, false)) {
for (AWSClient awsClient : awsClients) {
tagVolumesWithLatestAttachment(awsClient);
}
} else {
LOGGER.info(String.format("Volume tagging monkey is not enabled. You can set %s to true to enable it.",
prop));
}
}
private void tagVolumesWithLatestAttachment(AWSClient awsClient) {
List<Volume> volumes = awsClient.describeVolumes();
LOGGER.info(String.format("Trying to tag %d volumes for Janitor Monkey meta data.",
volumes.size()));
Date now = calendar.now().getTime();
for (Volume volume : volumes) {
String owner = null, instanceId = null;
Date lastDetachTime = null;
List<VolumeAttachment> attachments = volume.getAttachments();
List<Tag> tags = volume.getTags();
// The volume can have a special tag is it does not want to be changed/tagged
// by Janitor monkey.
if ("donotmark".equals(getTagValue(JanitorMonkey.JANITOR_TAG, tags))) {
LOGGER.info(String.format("The volume %s is tagged as not handled by Janitor",
volume.getVolumeId()));
continue;
}
Map<String, String> janitorMetadata = parseJanitorTag(tags);
// finding the instance attached most recently.
VolumeAttachment latest = null;
for (VolumeAttachment attachment : attachments) {
if (latest == null || latest.getAttachTime().before(attachment.getAttachTime())) {
latest = attachment;
}
}
if (latest != null) {
instanceId = latest.getInstanceId();
owner = getOwnerEmail(instanceId, janitorMetadata, tags, awsClient);
}
if (latest == null || "detached".equals(latest.getState())) {
if (janitorMetadata.get(JanitorMonkey.DETACH_TIME_TAG_KEY) == null) {
// There is no attached instance and the last detached time is not set.
// Use the current time as the last detached time.
LOGGER.info(String.format("Setting the last detached time to %s for volume %s",
now, volume.getVolumeId()));
lastDetachTime = now;
} else {
LOGGER.debug(String.format("The volume %s was already marked as detached at time %s",
volume.getVolumeId(), janitorMetadata.get(JanitorMonkey.DETACH_TIME_TAG_KEY)));
}
} else {
// The volume is currently attached to an instance
lastDetachTime = null;
}
String existingOwner = janitorMetadata.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
if (owner == null && existingOwner != null) {
// Save the current owner in the tag when we are not able to find a owner.
owner = existingOwner;
}
if (needsUpdate(janitorMetadata, owner, instanceId, lastDetachTime)) {
Event evt = updateJanitorMetaTag(volume, instanceId, owner, lastDetachTime, awsClient);
if (evt != null) {
context().recorder().recordEvent(evt);
}
}
}
}
private String getOwnerEmail(String instanceId, Map<String, String> janitorMetadata,
List<Tag> tags, AWSClient awsClient) {
// The owner of the volume is set as the owner of the last instance attached to it.
String owner = awsClientToInstanceToOwner.get(awsClient).get(instanceId);
if (owner == null) {
owner = janitorMetadata.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
if (owner == null) {
owner = getTagValue(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY, tags);
}
String emailDomain = getOwnerEmailDomain();
if (owner != null && !owner.contains("@")
&& StringUtils.isNotBlank(emailDomain)) {
owner = String.format("%s@%s", owner, emailDomain);
}
return owner;
}
/**
* Parses the Janitor meta tag set by this monkey and gets a map from key
* to value for the tag values.
* @param tags the tags of the volumes
* @return the map from the Janitor meta tag key to value
*/
private static Map<String, String> parseJanitorTag(List<Tag> tags) {
String janitorTag = getTagValue(JanitorMonkey.JANITOR_META_TAG, tags);
return parseJanitorMetaTag(janitorTag);
}
/**
* Parses the string of Janitor meta-data tag value to get a key value map.
* @param janitorMetaTag the value of the Janitor meta-data tag
* @return the key value map in the Janitor meta-data tag
*/
public static Map<String, String> parseJanitorMetaTag(String janitorMetaTag) {
Map<String, String> metadata = new HashMap<String, String>();
if (janitorMetaTag != null) {
for (String keyValue : janitorMetaTag.split(";")) {
String[] meta = keyValue.split("=");
if (meta.length == 2) {
metadata.put(meta[0], meta[1]);
}
}
}
return metadata;
}
/** Gets the domain name for the owner email. The method can be overridden in subclasses.
*
* @return the domain name for the owner email.
*/
protected String getOwnerEmailDomain() {
return config.getStrOrElse("simianarmy.volumeTagging.ownerEmailDomain", "");
}
private Event updateJanitorMetaTag(Volume volume, String instance, String owner, Date lastDetachTime,
AWSClient awsClient) {
String meta = makeMetaTag(instance, owner, lastDetachTime);
Map<String, String> janitorTags = new HashMap<String, String>();
janitorTags.put(JanitorMonkey.JANITOR_META_TAG, meta);
LOGGER.info(String.format("Setting tag %s to '%s' for volume %s",
JanitorMonkey.JANITOR_META_TAG, meta, volume.getVolumeId()));
String prop = "simianarmy.volumeTagging.leashed";
Event evt = null;
if (config.getBoolOrElse(prop, true)) {
LOGGER.info("Volume tagging monkey is leashed. No real change is made to the volume.");
} else {
try {
awsClient.createTagsForResources(janitorTags, volume.getVolumeId());
evt = context().recorder().newEvent(type(), EventTypes.TAGGING_JANITOR,
awsClient.region(), volume.getVolumeId());
evt.addField(JanitorMonkey.JANITOR_META_TAG, meta);
} catch (Exception e) {
LOGGER.error(String.format("Failed to update the tag for volume %s", volume.getVolumeId()));
}
}
return evt;
}
/**
* Makes the Janitor meta tag for volumes to track the last attachment/detachment information.
* The method is intentionally made public for testing.
* @param instance the last attached instance
* @param owner the last owner
* @param lastDetachTime the detach time
* @return the meta tag of Janitor Monkey
*/
public static String makeMetaTag(String instance, String owner, Date lastDetachTime) {
StringBuilder meta = new StringBuilder();
meta.append(String.format("%s=%s;",
JanitorMonkey.INSTANCE_TAG_KEY, instance == null ? "" : instance));
meta.append(String.format("%s=%s;", BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY, owner == null ? "" : owner));
meta.append(String.format("%s=%s", JanitorMonkey.DETACH_TIME_TAG_KEY,
lastDetachTime == null ? "" : AWSResource.DATE_FORMATTER.print(lastDetachTime.getTime())));
return meta.toString();
}
private static String getTagValue(String key, List<Tag> tags) {
for (Tag tag : tags) {
if (tag.getKey().equals(key)) {
return tag.getValue();
}
}
return null;
}
/** Needs to update tags of the volume if
* 1) owner or instance attached changed or
* 2) the last detached status is changed.
*/
private static boolean needsUpdate(Map<String, String> metadata,
String owner, String instance, Date lastDetachTime) {
return (owner != null && !StringUtils.equals(metadata.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY), owner))
|| (instance != null && !StringUtils.equals(metadata.get(JanitorMonkey.INSTANCE_TAG_KEY), instance))
|| lastDetachTime != null;
}
}
| 4,844 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/InstanceJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for auto scaling instance cleanup.
*/
public class InstanceJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public InstanceJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.INSTANCE);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Terminating instance %s", resource.getId()));
awsClient.terminateInstance(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 4,845 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/ASGJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
/**
* The Janitor responsible for ASG cleanup.
*/
public class ASGJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public ASGJanitor(AWSClient awsClient, AbstractJanitor.Context ctx) {
super(ctx, AWSResourceType.ASG);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting ASG %s", resource.getId()));
awsClient.deleteAutoScalingGroup(resource.getId());
}
@Override
protected void postCleanup(Resource resource) {
}
}
| 4,846 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/ELBJanitor.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.AbstractJanitor;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Janitor responsible for elastic load balancer cleanup.
*/
public class ELBJanitor extends AbstractJanitor {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ELBJanitor.class);
private final AWSClient awsClient;
/**
* Constructor.
* @param awsClient the AWS client
* @param ctx the context
*/
public ELBJanitor(AWSClient awsClient, Context ctx) {
super(ctx, AWSResourceType.ELB);
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
@Override
protected void postMark(Resource resource) {
}
@Override
protected void cleanup(Resource resource) {
LOGGER.info(String.format("Deleting ELB %s", resource.getId()));
awsClient.deleteElasticLoadBalancer(resource.getId());
// delete any DNS records attached to this ELB
String dnsNames = resource.getAdditionalField("referencedDNS");
String dnsTypes = resource.getAdditionalField("referencedDNSTypes");
String dnsZones = resource.getAdditionalField("referencedDNSZones");
if (StringUtils.isNotBlank(dnsNames) && StringUtils.isNotBlank(dnsTypes) && StringUtils.isNotBlank(dnsZones)) {
String[] dnsNamesSplit = StringUtils.split(dnsNames,',');
String[] dnsTypesSplit = StringUtils.split(dnsTypes,',');
String[] dnsZonesSplit = StringUtils.split(dnsZones,',');
if (dnsNamesSplit.length != dnsTypesSplit.length) {
LOGGER.error(String.format("DNS Name count does not match DNS Type count, aborting DNS delete for ELB %s"), resource.getId());
LOGGER.error(String.format("DNS Names found but not deleted: %s for ELB %s"), dnsNames, resource.getId());
return;
}
if (dnsNamesSplit.length != dnsZonesSplit.length) {
LOGGER.error(String.format("DNS Name count does not match DNS Zone count, aborting DNS delete for ELB %s"), resource.getId());
LOGGER.error(String.format("DNS Names found but not deleted: %s for ELB %s"), dnsNames, resource.getId());
return;
}
for(int i=0; i<dnsNamesSplit.length; i++) {
LOGGER.info(String.format("Deleting DNS Record %s for ELB %s of type %s in zone %s", dnsNamesSplit[i], resource.getId(), dnsTypesSplit[i], dnsZonesSplit[i]));
awsClient.deleteDNSRecord(dnsNamesSplit[i], dnsTypesSplit[i], dnsZonesSplit[i]);
}
}
}
@Override
protected void postCleanup(Resource resource) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
LOGGER.warn("Post-cleanup sleep was interrupted", e);
}
}
}
| 4,847 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/RDSJanitorResourceTracker.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor;
import com.amazonaws.AmazonClientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.Resource.CleanupState;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.janitor.JanitorResourceTracker;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.*;
/**
* The JanitorResourceTracker implementation in AWS RDS.
*/
public class RDSJanitorResourceTracker implements JanitorResourceTracker {
/** The Constant LOGGER. */
public static final Logger LOGGER = LoggerFactory.getLogger(RDSJanitorResourceTracker.class);
/** The table. */
private final String table;
/** the jdbcTemplate */
JdbcTemplate jdbcTemplate = null;
/**
* Instantiates a new RDS janitor resource tracker.
*
*/
public RDSJanitorResourceTracker(String dbDriver, String dbUser,
String dbPass, String dbUrl, String dbTable) {
HikariDataSource dataSource = new HikariDataSource();
dataSource.setDriverClassName(dbDriver);
dataSource.setJdbcUrl(dbUrl);
dataSource.setUsername(dbUser);
dataSource.setPassword(dbPass);
dataSource.setMaximumPoolSize(2);
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.table = dbTable;
}
/**
* Instantiates a new RDS janitor resource tracker. This constructor is intended
* for unit testing.
*
*/
public RDSJanitorResourceTracker(JdbcTemplate jdbcTemplate, String table) {
this.jdbcTemplate = jdbcTemplate;
this.table = table;
}
public JdbcTemplate getJdbcTemplate() {
return jdbcTemplate;
}
public Object value(String value) {
return value == null ? Types.NULL : value;
}
public Object value(Date value) {
return value == null ? Types.NULL : value.getTime();
}
public Object value(boolean value) {
return new Boolean(value).toString();
}
public Object emailValue(String email) {
if (StringUtils.isBlank(email)) return Types.NULL;
if (email.equals("0")) return Types.NULL;
return email;
}
/** {@inheritDoc} */
@Override
public void addOrUpdate(Resource resource) {
Resource orig = getResource(resource.getId(), resource.getRegion());
LOGGER.debug(String.format("Saving resource %s to RDB table %s in region %s", resource.getId(), table, resource.getRegion()));
String json;
try {
json = new ObjectMapper().writeValueAsString(additionalFieldsAsMap(resource));
} catch (JsonProcessingException e) {
LOGGER.error("ERROR generating additional field JSON when saving resource " + resource.getId(), e);
return;
}
if (orig == null) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table);
sb.append(" (");
sb.append(AWSResource.FIELD_RESOURCE_ID).append(",");
sb.append(AWSResource.FIELD_RESOURCE_TYPE).append(",");
sb.append(AWSResource.FIELD_REGION).append(",");
sb.append(AWSResource.FIELD_OWNER_EMAIL).append(",");
sb.append(AWSResource.FIELD_DESCRIPTION).append(",");
sb.append(AWSResource.FIELD_STATE).append(",");
sb.append(AWSResource.FIELD_TERMINATION_REASON).append(",");
sb.append(AWSResource.FIELD_EXPECTED_TERMINATION_TIME).append(",");
sb.append(AWSResource.FIELD_ACTUAL_TERMINATION_TIME).append(",");
sb.append(AWSResource.FIELD_NOTIFICATION_TIME).append(",");
sb.append(AWSResource.FIELD_LAUNCH_TIME).append(",");
sb.append(AWSResource.FIELD_MARK_TIME).append(",");
sb.append(AWSResource.FIELD_OPT_OUT_OF_JANITOR).append(",");
sb.append("additionalFields").append(") values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)");
LOGGER.debug(String.format("Insert statement is '%s'", sb));
int updated = this.jdbcTemplate.update(sb.toString(),
resource.getId(),
value(resource.getResourceType().toString()),
value(resource.getRegion()),
emailValue(resource.getOwnerEmail()),
value(resource.getDescription()),
value(resource.getState().toString()),
value(resource.getTerminationReason()),
value(resource.getExpectedTerminationTime()),
value(resource.getActualTerminationTime()),
value(resource.getNotificationTime()),
value(resource.getLaunchTime()),
value(resource.getMarkTime()),
value(resource.isOptOutOfJanitor()),
json);
LOGGER.debug(String.format("%d rows inserted", updated));
} else {
StringBuilder sb = new StringBuilder();
sb.append("update ").append(table).append(" set ");
sb.append(AWSResource.FIELD_RESOURCE_TYPE).append("=?,");
sb.append(AWSResource.FIELD_REGION).append("=?,");
sb.append(AWSResource.FIELD_OWNER_EMAIL).append("=?,");
sb.append(AWSResource.FIELD_DESCRIPTION).append("=?,");
sb.append(AWSResource.FIELD_STATE).append("=?,");
sb.append(AWSResource.FIELD_TERMINATION_REASON).append("=?,");
sb.append(AWSResource.FIELD_EXPECTED_TERMINATION_TIME).append("=?,");
sb.append(AWSResource.FIELD_ACTUAL_TERMINATION_TIME).append("=?,");
sb.append(AWSResource.FIELD_NOTIFICATION_TIME).append("=?,");
sb.append(AWSResource.FIELD_LAUNCH_TIME).append("=?,");
sb.append(AWSResource.FIELD_MARK_TIME).append("=?,");
sb.append(AWSResource.FIELD_OPT_OUT_OF_JANITOR).append("=?,");
sb.append("additionalFields").append("=? where ");
sb.append(AWSResource.FIELD_RESOURCE_ID).append("=? and ");
sb.append(AWSResource.FIELD_REGION).append("=?");
LOGGER.debug(String.format("Update statement is '%s'", sb));
int updated = this.jdbcTemplate.update(sb.toString(),
resource.getResourceType().toString(),
value(resource.getRegion()),
emailValue(resource.getOwnerEmail()),
value(resource.getDescription()),
value(resource.getState().toString()),
value(resource.getTerminationReason()),
value(resource.getExpectedTerminationTime()),
value(resource.getActualTerminationTime()),
value(resource.getNotificationTime()),
value(resource.getLaunchTime()),
value(resource.getMarkTime()),
value(resource.isOptOutOfJanitor()),
json,
resource.getId(),
resource.getRegion());
LOGGER.debug(String.format("%d rows updated", updated));
}
LOGGER.debug("Successfully saved.");
}
/**
* Returns a list of AWSResource objects. You need to override this method if more
* specific resource types (e.g. subtypes of AWSResource) need to be obtained from
* the Database.
*/
@Override
public List<Resource> getResources(ResourceType resourceType, CleanupState state, String resourceRegion) {
Validate.notEmpty(resourceRegion);
StringBuilder query = new StringBuilder();
ArrayList<String> args = new ArrayList<>();
query.append(String.format("select * from %s where ", table));
if (resourceType != null) {
query.append("resourceType=? and ");
args.add(resourceType.toString());
}
if (state != null) {
query.append("state=? and ");
args.add(state.toString());
}
query.append("region=?");
args.add(resourceRegion);
LOGGER.debug(String.format("Query is '%s'", query));
List<Resource> resources = jdbcTemplate.query(query.toString(), args.toArray(), new RowMapper<Resource>() {
public Resource mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
return resources;
}
private Resource mapResource(ResultSet rs) throws SQLException {
String json = rs.getString("additionalFields");
Resource resource = null;
try {
// put additional fields
Map<String, String> map = new HashMap<>();
if (json != null) {
TypeReference<HashMap<String,String>> typeRef = new TypeReference<HashMap<String,String>>() {};
map = new ObjectMapper().readValue(json, typeRef);
}
// put everything else
map.put(AWSResource.FIELD_RESOURCE_ID, rs.getString(AWSResource.FIELD_RESOURCE_ID));
map.put(AWSResource.FIELD_RESOURCE_TYPE, rs.getString(AWSResource.FIELD_RESOURCE_TYPE));
map.put(AWSResource.FIELD_REGION, rs.getString(AWSResource.FIELD_REGION));
map.put(AWSResource.FIELD_DESCRIPTION, rs.getString(AWSResource.FIELD_DESCRIPTION));
map.put(AWSResource.FIELD_STATE, rs.getString(AWSResource.FIELD_STATE));
map.put(AWSResource.FIELD_TERMINATION_REASON, rs.getString(AWSResource.FIELD_TERMINATION_REASON));
map.put(AWSResource.FIELD_OPT_OUT_OF_JANITOR, rs.getString(AWSResource.FIELD_OPT_OUT_OF_JANITOR));
String email = rs.getString(AWSResource.FIELD_OWNER_EMAIL);
if (StringUtils.isBlank(email) || email.equals("0")) {
email = null;
}
map.put(AWSResource.FIELD_OWNER_EMAIL, email);
String expectedTerminationTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_EXPECTED_TERMINATION_TIME));
String actualTerminationTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_ACTUAL_TERMINATION_TIME));
String notificationTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_NOTIFICATION_TIME));
String launchTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_LAUNCH_TIME));
String markTime = millisToFormattedDate(rs.getString(AWSResource.FIELD_MARK_TIME));
if (expectedTerminationTime != null) {
map.put(AWSResource.FIELD_EXPECTED_TERMINATION_TIME, expectedTerminationTime);
}
if (actualTerminationTime != null) {
map.put(AWSResource.FIELD_ACTUAL_TERMINATION_TIME, actualTerminationTime);
}
if (notificationTime != null) {
map.put(AWSResource.FIELD_NOTIFICATION_TIME, notificationTime);
}
if (launchTime != null) {
map.put(AWSResource.FIELD_LAUNCH_TIME, launchTime);
}
if (markTime != null) {
map.put(AWSResource.FIELD_MARK_TIME, markTime);
}
resource = AWSResource.parseFieldtoValueMap(map);
}catch(IOException ie) {
String msg = "Error parsing resource from result set";
LOGGER.error(msg, ie);
throw new SQLException(msg);
}
return resource;
}
private String millisToFormattedDate(String millisStr) {
String datetime = null;
try {
long millis = Long.parseLong(millisStr);
datetime = AWSResource.DATE_FORMATTER.print(millis);
} catch(NumberFormatException nfe) {
LOGGER.error(String.format("Error parsing datetime %s when reading from RDS", millisStr));
}
return datetime;
}
@Override
public Resource getResource(String resourceId) {
Validate.notEmpty(resourceId);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where resourceId=?", table));
LOGGER.debug(String.format("Query is '%s'", query));
List<Resource> resources = jdbcTemplate.query(query.toString(), new String[]{resourceId}, new RowMapper<Resource>() {
public Resource mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
Resource resource = null;
Validate.isTrue(resources.size() <= 1);
if (resources.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s", resourceId));
} else {
resource = resources.get(0);
}
return resource;
}
@Override
public Resource getResource(String resourceId, String region) {
Validate.notEmpty(resourceId);
Validate.notEmpty(region);
StringBuilder query = new StringBuilder();
query.append(String.format("select * from %s where resourceId=? and region=?", table));
LOGGER.debug(String.format("Query is '%s'", query));
List<Resource> resources = jdbcTemplate.query(query.toString(), new String[]{resourceId,region}, new RowMapper<Resource>() {
public Resource mapRow(ResultSet rs, int rowNum) throws SQLException {
return mapResource(rs);
}
});
Resource resource = null;
Validate.isTrue(resources.size() <= 1);
if (resources.size() == 0) {
LOGGER.info(String.format("Not found resource with id %s", resourceId));
} else {
resource = resources.get(0);
}
return resource;
}
/**
* Creates the RDS table, if it does not already exist.
*/
public void init() {
try {
LOGGER.info("Creating RDS table: {}", table);
String sql = String.format("create table if not exists %s ("
+ " %s varchar(255), "
+ " %s varchar(255), "
+ " %s varchar(25), "
+ " %s varchar(255), "
+ " %s varchar(255), "
+ " %s varchar(25), "
+ " %s varchar(255), "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s BIGINT, "
+ " %s varchar(8), "
+ " %s varchar(4096) )",
table,
AWSResource.FIELD_RESOURCE_ID,
AWSResource.FIELD_RESOURCE_TYPE,
AWSResource.FIELD_REGION,
AWSResource.FIELD_OWNER_EMAIL,
AWSResource.FIELD_DESCRIPTION,
AWSResource.FIELD_STATE,
AWSResource.FIELD_TERMINATION_REASON,
AWSResource.FIELD_EXPECTED_TERMINATION_TIME,
AWSResource.FIELD_ACTUAL_TERMINATION_TIME,
AWSResource.FIELD_NOTIFICATION_TIME,
AWSResource.FIELD_LAUNCH_TIME,
AWSResource.FIELD_MARK_TIME,
AWSResource.FIELD_OPT_OUT_OF_JANITOR,
"additionalFields");
LOGGER.debug("Create SQL is: '{}'", sql);
jdbcTemplate.execute(sql);
} catch (AmazonClientException e) {
LOGGER.warn("Error while trying to auto-create RDS table", e);
}
}
private HashMap<String, String> additionalFieldsAsMap(Resource resource) {
HashMap<String, String> fields = new HashMap<>();
for(String key : resource.getAdditionalFieldNames()) {
fields.put(key, resource.getAdditionalField(key));
}
return fields;
}
}
| 4,848 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/LaunchConfigJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.LaunchConfiguration;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
/**
* The crawler to crawl AWS launch configurations for janitor monkey.
*/
public class LaunchConfigJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The name representing the additional field name of a flag indicating if the launch config
* if used by an auto scaling group. */
public static final String LAUNCH_CONFIG_FIELD_USED_BY_ASG = "USED_BY_ASG";
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(LaunchConfigJanitorCrawler.class);
/**
* Instantiates a new basic launch configuration crawler.
* @param awsClient
* the aws client
*/
public LaunchConfigJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.LAUNCH_CONFIG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("LAUNCH_CONFIG".equals(resourceType.name())) {
return getLaunchConfigResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getLaunchConfigResources(resourceIds);
}
private List<Resource> getLaunchConfigResources(String... launchConfigNames) {
List<Resource> resources = Lists.newArrayList();
AWSClient awsClient = getAWSClient();
Set<String> usedLCs = Sets.newHashSet();
for (AutoScalingGroup asg : awsClient.describeAutoScalingGroups()) {
usedLCs.add(asg.getLaunchConfigurationName());
}
for (LaunchConfiguration launchConfiguration : awsClient.describeLaunchConfigurations(launchConfigNames)) {
String lcName = launchConfiguration.getLaunchConfigurationName();
Resource lcResource = new AWSResource().withId(lcName)
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.LAUNCH_CONFIG)
.withLaunchTime(launchConfiguration.getCreatedTime());
lcResource.setOwnerEmail(getOwnerEmailForResource(lcResource));
lcResource.setAdditionalField(LAUNCH_CONFIG_FIELD_USED_BY_ASG, String.valueOf(usedLCs.contains(lcName)));
resources.add(lcResource);
}
return resources;
}
}
| 4,849 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/AbstractAWSJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.apache.commons.lang.Validate;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
/**
* The abstract class for crawler of AWS resources.
*/
public abstract class AbstractAWSJanitorCrawler implements JanitorCrawler {
/** The AWS client. */
private final AWSClient awsClient;
/**
* The constructor.
* @param awsClient the AWS client used by the crawler.
*/
public AbstractAWSJanitorCrawler(AWSClient awsClient) {
Validate.notNull(awsClient);
this.awsClient = awsClient;
}
/**
* Gets the owner email from the resource's tag key set in GLOBAL_OWNER_TAGKEY.
* @param resource the resource
* @return the owner email specified in the resource's tags
*/
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
/**
* Gets the AWS client used by the crawler.
* @return the AWS client used by the crawler.
*/
protected AWSClient getAWSClient() {
return awsClient;
}
}
| 4,850 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/EBSSnapshotJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.ec2.model.BlockDeviceMapping;
import com.amazonaws.services.ec2.model.EbsBlockDevice;
import com.amazonaws.services.ec2.model.Image;
import com.amazonaws.services.ec2.model.Snapshot;
import com.amazonaws.services.ec2.model.Tag;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The crawler to crawl AWS EBS snapshots for janitor monkey.
*/
public class EBSSnapshotJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSSnapshotJanitorCrawler.class);
/** The name representing the additional field name of AMIs generated using the snapshot. */
public static final String SNAPSHOT_FIELD_AMIS = "AMIs";
/** The map from snapshot id to the AMI ids that are generated using the snapshot. */
private final Map<String, Collection<String>> snapshotToAMIs =
new HashMap<String, Collection<String>>();
/**
* The constructor.
* @param awsClient the AWS client
*/
public EBSSnapshotJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_SNAPSHOT);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_SNAPSHOT".equals(resourceType.name())) {
return getSnapshotResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getSnapshotResources(resourceIds);
}
private List<Resource> getSnapshotResources(String... snapshotIds) {
refreshSnapshotToAMIs();
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
for (Snapshot snapshot : awsClient.describeSnapshots(snapshotIds)) {
Resource snapshotResource = new AWSResource().withId(snapshot.getSnapshotId())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.EBS_SNAPSHOT)
.withLaunchTime(snapshot.getStartTime()).withDescription(snapshot.getDescription());
for (Tag tag : snapshot.getTags()) {
LOGGER.debug(String.format("Adding tag %s = %s to resource %s",
tag.getKey(), tag.getValue(), snapshotResource.getId()));
snapshotResource.setTag(tag.getKey(), tag.getValue());
}
snapshotResource.setOwnerEmail(getOwnerEmailForResource(snapshotResource));
((AWSResource) snapshotResource).setAWSResourceState(snapshot.getState());
Collection<String> amis = snapshotToAMIs.get(snapshotResource.getId());
if (amis != null) {
snapshotResource.setAdditionalField(SNAPSHOT_FIELD_AMIS, StringUtils.join(amis, ","));
}
resources.add(snapshotResource);
}
return resources;
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
String owner = resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
if (owner == null) {
owner = super.getOwnerEmailForResource(resource);
}
return owner;
}
/**
* Gets the collection of AMIs that are created using a specific snapshot.
* @param snapshotId the snapshot id
*/
protected Collection<String> getAMIsForSnapshot(String snapshotId) {
Collection<String> amis = snapshotToAMIs.get(snapshotId);
if (amis != null) {
return Collections.unmodifiableCollection(amis);
} else {
return Collections.emptyList();
}
}
private void refreshSnapshotToAMIs() {
snapshotToAMIs.clear();
for (Image image : getAWSClient().describeImages()) {
for (BlockDeviceMapping bdm : image.getBlockDeviceMappings()) {
EbsBlockDevice ebd = bdm.getEbs();
if (ebd != null && ebd.getSnapshotId() != null) {
LOGGER.debug(String.format("Snapshot %s is used to generate AMI %s",
ebd.getSnapshotId(), image.getImageId()));
Collection<String> amis = snapshotToAMIs.get(ebd.getSnapshotId());
if (amis == null) {
amis = new ArrayList<String>();
snapshotToAMIs.put(ebd.getSnapshotId(), amis);
}
amis.add(image.getImageId());
}
}
}
}
}
| 4,851 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/InstanceJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.autoscaling.model.AutoScalingInstanceDetails;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.ec2.model.Tag;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The crawler to crawl AWS instances for janitor monkey.
*/
public class InstanceJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The name representing the additional field name of ASG's name. */
public static final String INSTANCE_FIELD_ASG_NAME = "ASG_NAME";
/** The name representing the additional field name of the OpsWork stack name. */
public static final String INSTANCE_FIELD_OPSWORKS_STACK_NAME = "OPSWORKS_STACK_NAME";
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(InstanceJanitorCrawler.class);
/**
* Instantiates a new basic instance crawler.
* @param awsClient
* the aws client
*/
public InstanceJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.INSTANCE);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("INSTANCE".equals(resourceType.name())) {
return getInstanceResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getInstanceResources(resourceIds);
}
private List<Resource> getInstanceResources(String... instanceIds) {
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
Map<String, AutoScalingInstanceDetails> idToASGInstance = new HashMap<String, AutoScalingInstanceDetails>();
for (AutoScalingInstanceDetails instanceDetails : awsClient.describeAutoScalingInstances(instanceIds)) {
idToASGInstance.put(instanceDetails.getInstanceId(), instanceDetails);
}
for (Instance instance : awsClient.describeInstances(instanceIds)) {
Resource instanceResource = new AWSResource().withId(instance.getInstanceId())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.INSTANCE)
.withLaunchTime(instance.getLaunchTime());
for (Tag tag : instance.getTags()) {
instanceResource.setTag(tag.getKey(), tag.getValue());
}
String description = String.format("type=%s; host=%s", instance.getInstanceType(),
instance.getPublicDnsName() == null ? "" : instance.getPublicDnsName());
instanceResource.setDescription(description);
instanceResource.setOwnerEmail(getOwnerEmailForResource(instanceResource));
String asgName = getAsgName(instanceResource, idToASGInstance);
if (asgName != null) {
instanceResource.setAdditionalField(INSTANCE_FIELD_ASG_NAME, asgName);
LOGGER.info(String.format("instance %s has a ASG tag name %s.", instanceResource.getId(), asgName));
}
String opsworksStackName = getOpsWorksStackName(instanceResource);
if (opsworksStackName != null) {
instanceResource.setAdditionalField(INSTANCE_FIELD_OPSWORKS_STACK_NAME, opsworksStackName);
LOGGER.info(String.format("instance %s is part of an OpsWorks stack named %s.", instanceResource.getId(), opsworksStackName));
}
if (instance.getState() != null) {
((AWSResource) instanceResource).setAWSResourceState(instance.getState().getName());
}
resources.add(instanceResource);
}
return resources;
}
private String getAsgName(Resource instanceResource, Map<String, AutoScalingInstanceDetails> idToASGInstance) {
String asgName = instanceResource.getTag("aws:autoscaling:groupName");
if (asgName == null) {
// At most times the aws:autoscaling:groupName tag has the ASG name, but there are cases
// that the instance is not correctly tagged and we can find the ASG name from AutoScaling
// service.
AutoScalingInstanceDetails instanceDetails = idToASGInstance.get(instanceResource.getId());
if (instanceDetails != null) {
asgName = instanceDetails.getAutoScalingGroupName();
}
}
return asgName;
}
private String getOpsWorksStackName(Resource instanceResource) {
return instanceResource.getTag("opsworks:stack");
}
}
| 4,852 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/EBSVolumeJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.Collections;
import java.util.EnumSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.ec2.model.Tag;
import com.amazonaws.services.ec2.model.Volume;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.client.aws.AWSClient;
import com.netflix.simianarmy.janitor.JanitorMonkey;
/**
* The crawler to crawl AWS EBS volumes for janitor monkey.
*/
public class EBSVolumeJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EBSVolumeJanitorCrawler.class);
/**
* The constructor.
* @param awsClient the AWS client
*/
public EBSVolumeJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_VOLUME);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_VOLUME".equals(resourceType.name())) {
return getVolumeResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getVolumeResources(resourceIds);
}
private List<Resource> getVolumeResources(String... volumeIds) {
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
for (Volume volume : awsClient.describeVolumes(volumeIds)) {
Resource volumeResource = new AWSResource().withId(volume.getVolumeId())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.EBS_VOLUME)
.withLaunchTime(volume.getCreateTime());
for (Tag tag : volume.getTags()) {
LOGGER.info(String.format("Adding tag %s = %s to resource %s",
tag.getKey(), tag.getValue(), volumeResource.getId()));
volumeResource.setTag(tag.getKey(), tag.getValue());
}
volumeResource.setOwnerEmail(getOwnerEmailForResource(volumeResource));
volumeResource.setDescription(getVolumeDescription(volume));
((AWSResource) volumeResource).setAWSResourceState(volume.getState());
resources.add(volumeResource);
}
return resources;
}
private String getVolumeDescription(Volume volume) {
StringBuilder description = new StringBuilder();
Integer size = volume.getSize();
description.append(String.format("size=%s", size == null ? "unknown" : size));
for (Tag tag : volume.getTags()) {
description.append(String.format("; %s=%s", tag.getKey(), tag.getValue()));
}
return description.toString();
}
@Override
public String getOwnerEmailForResource(Resource resource) {
String owner = super.getOwnerEmailForResource(resource);
if (owner == null) {
// try to find the owner from Janitor Metadata tag set by the volume tagging monkey.
Map<String, String> janitorTag = VolumeTaggingMonkey.parseJanitorMetaTag(resource.getTag(
JanitorMonkey.JANITOR_META_TAG));
owner = janitorTag.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
return owner;
}
}
| 4,853 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/ASGJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.Instance;
import com.amazonaws.services.autoscaling.model.LaunchConfiguration;
import com.amazonaws.services.autoscaling.model.SuspendedProcess;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
/**
* The crawler to crawl AWS auto scaling groups for janitor monkey.
*/
public class ASGJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ASGJanitorCrawler.class);
/** The name representing the additional field name of instance ids. */
public static final String ASG_FIELD_INSTANCES = "INSTANCES";
/** The name representing the additional field name of max ASG size. */
public static final String ASG_FIELD_MAX_SIZE = "MAX_SIZE";
/** The name representing the additional field name of ELB names. */
public static final String ASG_FIELD_ELBS = "ELBS";
/** The name representing the additional field name of launch configuration name. */
public static final String ASG_FIELD_LC_NAME = "LAUNCH_CONFIGURATION_NAME";
/** The name representing the additional field name of launch configuration creation time. */
public static final String ASG_FIELD_LC_CREATION_TIME = "LAUNCH_CONFIGURATION_CREATION_TIME";
/** The name representing the additional field name of ASG suspension time from ELB. */
public static final String ASG_FIELD_SUSPENSION_TIME = "ASG_SUSPENSION_TIME";
private final Map<String, LaunchConfiguration> nameToLaunchConfig = new HashMap<String, LaunchConfiguration>();
/** The regular expression patter below is for the termination reason added by AWS when
* an ASG is suspended from ELB's traffic.
*/
private static final Pattern SUSPENSION_REASON_PATTERN =
Pattern.compile("User suspended at (\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}).*");
/** The date format used to print or parse the suspension time value. **/
public static final DateTimeFormatter SUSPENSION_TIME_FORMATTER =
DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss");
/**
* Instantiates a new basic ASG crawler.
* @param awsClient
* the aws client
*/
public ASGJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ASG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ASG".equals(resourceType.name())) {
return getASGResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... asgNames) {
return getASGResources(asgNames);
}
private List<Resource> getASGResources(String... asgNames) {
AWSClient awsClient = getAWSClient();
List<LaunchConfiguration> launchConfigurations = awsClient.describeLaunchConfigurations();
for (LaunchConfiguration lc : launchConfigurations) {
nameToLaunchConfig.put(lc.getLaunchConfigurationName(), lc);
}
List<Resource> resources = new LinkedList<Resource>();
for (AutoScalingGroup asg : awsClient.describeAutoScalingGroups(asgNames)) {
Resource asgResource = new AWSResource().withId(asg.getAutoScalingGroupName())
.withResourceType(AWSResourceType.ASG).withRegion(awsClient.region())
.withLaunchTime(asg.getCreatedTime());
for (TagDescription tag : asg.getTags()) {
asgResource.setTag(tag.getKey(), tag.getValue());
}
asgResource.setDescription(String.format("%d instances", asg.getInstances().size()));
asgResource.setOwnerEmail(getOwnerEmailForResource(asgResource));
if (asg.getStatus() != null) {
((AWSResource) asgResource).setAWSResourceState(asg.getStatus());
}
Integer maxSize = asg.getMaxSize();
if (maxSize != null) {
asgResource.setAdditionalField(ASG_FIELD_MAX_SIZE, String.valueOf(maxSize));
}
// Adds instances and ELBs as additional fields.
List<String> instances = new ArrayList<String>();
for (Instance instance : asg.getInstances()) {
instances.add(instance.getInstanceId());
}
asgResource.setAdditionalField(ASG_FIELD_INSTANCES, StringUtils.join(instances, ","));
asgResource.setAdditionalField(ASG_FIELD_ELBS,
StringUtils.join(asg.getLoadBalancerNames(), ","));
String lcName = asg.getLaunchConfigurationName();
LaunchConfiguration lc = nameToLaunchConfig.get(lcName);
if (lc != null) {
asgResource.setAdditionalField(ASG_FIELD_LC_NAME, lcName);
}
if (lc != null && lc.getCreatedTime() != null) {
asgResource.setAdditionalField(ASG_FIELD_LC_CREATION_TIME,
String.valueOf(lc.getCreatedTime().getTime()));
}
// sets the field for the time when the ASG's traffic is suspended from ELB
for (SuspendedProcess sp : asg.getSuspendedProcesses()) {
if ("AddToLoadBalancer".equals(sp.getProcessName())) {
String suspensionTime = getSuspensionTimeString(sp.getSuspensionReason());
if (suspensionTime != null) {
LOGGER.info(String.format("Suspension time of ASG %s is %s",
asg.getAutoScalingGroupName(), suspensionTime));
asgResource.setAdditionalField(ASG_FIELD_SUSPENSION_TIME, suspensionTime);
break;
}
}
}
resources.add(asgResource);
}
return resources;
}
private String getSuspensionTimeString(String suspensionReason) {
if (suspensionReason == null) {
return null;
}
Matcher matcher = SUSPENSION_REASON_PATTERN.matcher(suspensionReason);
if (matcher.matches()) {
return matcher.group(1);
}
return null;
}
}
| 4,854 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/ELBJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.elasticloadbalancing.model.Instance;
import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription;
import com.amazonaws.services.elasticloadbalancing.model.Tag;
import com.amazonaws.services.elasticloadbalancing.model.TagDescription;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.aws.AWSClient;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* The crawler to crawl AWS instances for janitor monkey.
*/
public class ELBJanitorCrawler extends AbstractAWSJanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ELBJanitorCrawler.class);
/**
* Instantiates a new basic instance crawler.
* @param awsClient
* the aws client
*/
public ELBJanitorCrawler(AWSClient awsClient) {
super(awsClient);
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ELB);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ELB".equals(resourceType.name())) {
return getELBResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getELBResources(resourceIds);
}
private List<Resource> getELBResources(String... elbNames) {
List<Resource> resources = new LinkedList<Resource>();
AWSClient awsClient = getAWSClient();
for (LoadBalancerDescription elb : awsClient.describeElasticLoadBalancers(elbNames)) {
Resource resource = new AWSResource().withId(elb.getLoadBalancerName())
.withRegion(getAWSClient().region()).withResourceType(AWSResourceType.ELB)
.withLaunchTime(elb.getCreatedTime());
resource.setOwnerEmail(getOwnerEmailForResource(resource));
resources.add(resource);
List<Instance> instances = elb.getInstances();
if (instances == null || instances.size() == 0) {
resource.setAdditionalField("instanceCount", "0");
resource.setDescription("instances=none");
LOGGER.debug(String.format("No instances found for ELB %s", resource.getId()));
} else {
resource.setAdditionalField("instanceCount", "" + instances.size());
ArrayList<String> instanceList = new ArrayList<String>(instances.size());
LOGGER.debug(String.format("Found %d instances for ELB %s", instances.size(), resource.getId()));
for (Instance instance : instances) {
String instanceId = instance.getInstanceId();
instanceList.add(instanceId);
}
String instancesStr = StringUtils.join(instanceList,",");
resource.setDescription(String.format("instances=%s", instances));
LOGGER.debug(String.format("Resource ELB %s has instances %s", resource.getId(), instancesStr));
}
for(TagDescription tagDescription : awsClient.describeElasticLoadBalancerTags(resource.getId())) {
for(Tag tag : tagDescription.getTags()) {
LOGGER.debug(String.format("Adding tag %s = %s to resource %s",
tag.getKey(), tag.getValue(), resource.getId()));
resource.setTag(tag.getKey(), tag.getValue());
}
}
}
Map<String, List<String>> elbtoASGMap = buildELBtoASGMap();
for(Resource resource : resources) {
List<String> asgList = elbtoASGMap.get(resource.getId());
if (asgList != null && asgList.size() > 0) {
resource.setAdditionalField("referencedASGCount", "" + asgList.size());
String asgStr = StringUtils.join(asgList,",");
resource.setDescription(resource.getDescription() + ", ASGS=" + asgStr);
LOGGER.debug(String.format("Resource ELB %s is referenced by ASGs %s", resource.getId(), asgStr));
} else {
resource.setAdditionalField("referencedASGCount", "0");
resource.setDescription(resource.getDescription() + ", ASGS=none");
LOGGER.debug(String.format("No ASGs found for ELB %s", resource.getId()));
}
}
return resources;
}
private Map<String, List<String>> buildELBtoASGMap() {
AWSClient awsClient = getAWSClient();
LOGGER.info(String.format("Getting all ELBs associated with ASGs in region %s", awsClient.region()));
List<AutoScalingGroup> autoScalingGroupList = awsClient.describeAutoScalingGroups();
HashMap<String, List<String>> asgMap = new HashMap<>();
for (AutoScalingGroup asg : autoScalingGroupList) {
String asgName = asg.getAutoScalingGroupName();
if (asg.getLoadBalancerNames() != null ) {
for (String elbName : asg.getLoadBalancerNames()) {
List<String> asgList = asgMap.get(elbName);
if (asgList == null) {
asgList = new ArrayList<>();
asgMap.put(elbName, asgList);
}
asgList.add(asgName);
LOGGER.debug(String.format("Found ASG %s associated with ELB %s", asgName, elbName));
}
}
}
return asgMap;
}
}
| 4,855 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaEBSVolumeJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
/**
* The crawler to crawl AWS EBS volumes for Janitor monkey using Edda.
*/
public class EddaEBSVolumeJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaEBSVolumeJanitorCrawler.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
private static final int BATCH_SIZE = 50;
// The value below specifies how many days we want to look back in Edda to find the owner of old instances.
// In case of Edda keeps too much history data, without a reasonable date range, the query may fail.
private static final int LOOKBACK_DAYS = 90;
/**
* The field name for purpose.
*/
public static final String PURPOSE = "purpose";
/**
* The field name for deleteOnTermination.
*/
public static final String DELETE_ON_TERMINATION = "deleteOnTermination";
/**
* The field name for detach time.
*/
public static final String DETACH_TIME = "detachTime";
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Map<String, String> instanceToOwner = Maps.newHashMap();
/**
* The constructor.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaEBSVolumeJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
updateInstanceToOwner(region);
}
LOGGER.info(String.format("Found owner for %d instances in %s", instanceToOwner.size(), this.regions));
}
private void updateInstanceToOwner(String region) {
LOGGER.info(String.format("Getting owners for all instances in region %s", region));
long startTime = DateTime.now().minusDays(LOOKBACK_DAYS).getMillis();
String url = String.format("%1$s/view/instances;_since=%2$d;state.name=running;tags.key=%3$s;"
+ "_expand:(instanceId,tags:(key,value))",
eddaClient.getBaseUrl(region), startTime, BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for instance owners in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
String instanceId = elem.get("instanceId").getTextValue();
JsonNode tags = elem.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
continue;
}
for (Iterator<JsonNode> tagsIt = tags.getElements(); tagsIt.hasNext();) {
JsonNode tag = tagsIt.next();
String tagKey = tag.get("key").getTextValue();
if (BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY.equals(tagKey)) {
instanceToOwner.put(instanceId, tag.get("value").getTextValue());
break;
}
}
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_VOLUME);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_VOLUME".equals(resourceType.name())) {
return getVolumeResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getVolumeResources(resourceIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getVolumeResources(String... volumeIds) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getUnattachedVolumeResourcesInRegion(region, volumeIds));
addLastAttachmentInfo(resources);
}
return resources;
}
/**
* Gets all volumes that are not attached to any instance. Janitor Monkey only considers unattached volumes
* as cleanup candidates, so there is no need to get volumes that are in-use.
* @param region
* @return list of resources that are not attached to any instance
*/
private List<Resource> getUnattachedVolumeResourcesInRegion(String region, String... volumeIds) {
String url = eddaClient.getBaseUrl(region) + "/aws/volumes;";
if (volumeIds != null && volumeIds.length != 0) {
url += StringUtils.join(volumeIds, ',');
LOGGER.info(String.format("Getting volumes in region %s for %d ids", region, volumeIds.length));
} else {
LOGGER.info(String.format("Getting all unattached volumes in region %s", region));
}
url += ";state=available;_expand:(volumeId,createTime,size,state,tags)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for unattached volumes in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToVolumeResource(region, it.next()));
}
return resources;
}
private Resource parseJsonElementToVolumeResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
long createTime = jsonNode.get("createTime").asLong();
Resource resource = new AWSResource().withId(jsonNode.get("volumeId").getTextValue()).withRegion(region)
.withResourceType(AWSResourceType.EBS_VOLUME)
.withLaunchTime(new Date(createTime));
JsonNode tags = jsonNode.get("tags");
StringBuilder description = new StringBuilder();
JsonNode size = jsonNode.get("size");
description.append(String.format("size=%s", size == null ? "unknown" : size.getIntValue()));
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
description.append(String.format("; %s=%s", key, value));
resource.setTag(key, value);
if (key.equals(PURPOSE)) {
resource.setAdditionalField(PURPOSE, value);
}
}
resource.setDescription(description.toString());
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").getTextValue());
return resource;
}
/**
* Adds information of last attachment to the resources. To be compatible with the AWS implementation of
* the same crawler, add the information to the JANITOR_META tag. It always uses the latest information
* to update the tag in this resource (not writing back to AWS) no matter if the tag exists.
* @param resources the volume resources
*/
private void addLastAttachmentInfo(List<Resource> resources) {
Validate.notNull(resources);
LOGGER.info(String.format("Updating the latest attachment info for %d resources", resources.size()));
Map<String, List<Resource>> regionToResources = Maps.newHashMap();
for (Resource resource : resources) {
List<Resource> regionalList = regionToResources.get(resource.getRegion());
if (regionalList == null) {
regionalList = Lists.newArrayList();
regionToResources.put(resource.getRegion(), regionalList);
}
regionalList.add(resource);
}
for (Map.Entry<String, List<Resource>> entry : regionToResources.entrySet()) {
LOGGER.info(String.format("Updating the latest attachment info for %d resources in region %s",
resources.size(), entry.getKey()));
for (List<Resource> batch : Lists.partition(entry.getValue(), BATCH_SIZE)) {
LOGGER.info(String.format("Processing batch of size %d", batch.size()));
String batchUrl = getBatchUrl(entry.getKey(), batch);
JsonNode batchResult = null;
try {
batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
} catch (IOException e) {
LOGGER.error("Failed to get response for the batch.", e);
}
Map<String, Resource> idToResource = Maps.newHashMap();
for (Resource resource : batch) {
idToResource.put(resource.getId(), resource);
}
if (batchResult == null || !batchResult.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
batchUrl, batchResult));
}
Set<String> processedIds = Sets.newHashSet();
for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String volumeId = data.get("volumeId").getTextValue();
Resource resource = idToResource.get(volumeId);
JsonNode attachments = data.get("attachments");
if (!(attachments.isArray() && attachments.size() > 0)) {
continue;
}
JsonNode attachment = attachments.get(0);
JsonNode ltime = elem.get("ltime");
if (ltime == null || ltime.isNull()) {
continue;
}
DateTime detachTime = new DateTime(ltime.asLong());
processedIds.add(volumeId);
setAttachmentInfo(volumeId, attachment, detachTime, resource);
}
for (Map.Entry<String, Resource> volumeEntry : idToResource.entrySet()) {
String id = volumeEntry.getKey();
if (!processedIds.contains(id)) {
Resource resource = volumeEntry.getValue();
LOGGER.info(String.format("Volume %s never was attached, use createTime %s as the detachTime",
id, resource.getLaunchTime()));
setAttachmentInfo(id, null, new DateTime(resource.getLaunchTime().getTime()), resource);
}
}
}
}
}
private void setAttachmentInfo(String volumeId, JsonNode attachment, DateTime detachTime, Resource resource) {
String instanceId = null;
if (attachment != null) {
boolean deleteOnTermination = attachment.get(DELETE_ON_TERMINATION).getBooleanValue();
if (deleteOnTermination) {
LOGGER.info(String.format(
"Volume %s had set the deleteOnTermination flag as true", volumeId));
}
resource.setAdditionalField(DELETE_ON_TERMINATION, String.valueOf(deleteOnTermination));
instanceId = attachment.get("instanceId").getTextValue();
}
// The subclass can customize the way to get the owner for a volume
String owner = getOwnerEmailForResource(resource);
if (owner == null && instanceId != null) {
owner = instanceToOwner.get(instanceId);
}
resource.setOwnerEmail(owner);
String metaTag = makeMetaTag(instanceId, owner, detachTime);
LOGGER.info(String.format("Setting Janitor Metatag as %s for volume %s", metaTag, volumeId));
resource.setTag(JanitorMonkey.JANITOR_META_TAG, metaTag);
LOGGER.info(String.format("The last detach time of volume %s is %s", volumeId, detachTime));
resource.setAdditionalField(DETACH_TIME, String.valueOf(detachTime.getMillis()));
}
private String makeMetaTag(String instance, String owner, DateTime lastDetachTime) {
StringBuilder meta = new StringBuilder();
meta.append(String.format("%s=%s;",
JanitorMonkey.INSTANCE_TAG_KEY, instance == null ? "" : instance));
meta.append(String.format("%s=%s;", BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY, owner == null ? "" : owner));
meta.append(String.format("%s=%s", JanitorMonkey.DETACH_TIME_TAG_KEY,
lastDetachTime == null ? "" : AWSResource.DATE_FORMATTER.print(lastDetachTime)));
return meta.toString();
}
private String getBatchUrl(String region, List<Resource> batch) {
StringBuilder batchUrl = new StringBuilder(eddaClient.getBaseUrl(region) + "/aws/volumes/");
boolean isFirst = true;
for (Resource resource : batch) {
if (!isFirst) {
batchUrl.append(',');
} else {
isFirst = false;
}
batchUrl.append(resource.getId());
}
batchUrl.append(";data.state=in-use;_since=0;_expand;_meta:"
+ "(ltime,data:(volumeId,attachments:(deleteOnTermination,instanceId)))");
return batchUrl.toString();
}
}
| 4,856 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaInstanceJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.aws.janitor.crawler.InstanceJanitorCrawler;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.HashSet;
import java.util.HashMap;
/**
* The crawler to crawl AWS instances for janitor monkey using Edda.
*/
public class EddaInstanceJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaInstanceJanitorCrawler.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Map<String, String> instanceToAsg = Maps.newHashMap();
/** Max image ids per Edda Query */
private static final int MAX_IMAGE_IDS_PER_QUERY = 40;
/**
* Instantiates a new basic instance crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaInstanceJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.INSTANCE);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("INSTANCE".equals(resourceType.name())) {
return getInstanceResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getInstanceResources(resourceIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getInstanceResources(String... instanceIds) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getInstanceResourcesInRegion(region, instanceIds));
}
return resources;
}
private List<Resource> getInstanceResourcesInRegion(String region, String... instanceIds) {
refreshAsgInstances();
String url = eddaClient.getBaseUrl(region) + "/view/instances;";
if (instanceIds != null && instanceIds.length != 0) {
url += StringUtils.join(instanceIds, ',');
LOGGER.info(String.format("Getting instances in region %s for %d ids", region, instanceIds.length));
} else {
LOGGER.info(String.format("Getting all instances in region %s", region));
}
url += ";state.name=running;_expand:(instanceId,launchTime,state:(name),instanceType,imageId"
+ ",publicDnsName,tags:(key,value))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for instances in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToInstanceResource(region, it.next()));
}
refreshOwnerByImage(region, resources);
return resources;
}
private Resource parseJsonElementToInstanceResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
String instanceId = jsonNode.get("instanceId").getTextValue();
long launchTime = jsonNode.get("launchTime").getLongValue();
Resource resource = new AWSResource().withId(instanceId).withRegion(region)
.withResourceType(AWSResourceType.INSTANCE)
.withLaunchTime(new Date(launchTime));
JsonNode publicDnsName = jsonNode.get("publicDnsName");
String description = String.format("type=%s; host=%s",
jsonNode.get("instanceType").getTextValue(),
publicDnsName == null ? "" : publicDnsName.getTextValue());
resource.setDescription(description);
String owner = getOwnerEmailForResource(resource);
resource.setOwnerEmail(owner);
JsonNode tags = jsonNode.get("tags");
String asgName = null;
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
if ("aws:autoscaling:groupName".equals(key)) {
asgName = value;
} else if (owner == null && BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY.equals(key)) {
resource.setOwnerEmail(value);
}
}
resource.setDescription(description.toString());
}
// If we cannot find ASG name in tags, use the map for the ASG name
if (asgName == null) {
asgName = instanceToAsg.get(instanceId);
if (asgName != null) {
LOGGER.debug(String.format("Failed to find ASG name in tags of %s, use the ASG name %s from map",
instanceId, asgName));
}
}
if (asgName != null) {
resource.setAdditionalField(InstanceJanitorCrawler.INSTANCE_FIELD_ASG_NAME, asgName);
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").get("name").getTextValue());
String imageId = jsonNode.get("imageId").getTextValue();
resource.setAdditionalField("imageId", imageId);
return resource;
}
private void refreshAsgInstances() {
instanceToAsg.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting ASG instances in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups"
+ ";_expand:(autoScalingGroupName,instances:(instanceId))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ASGs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode asg = it.next();
String asgName = asg.get("autoScalingGroupName").getTextValue();
JsonNode instances = asg.get("instances");
if (instances == null || instances.isNull() || !instances.isArray() || instances.size() == 0) {
continue;
}
for (Iterator<JsonNode> instanceIt = instances.getElements(); instanceIt.hasNext();) {
JsonNode instance = instanceIt.next();
instanceToAsg.put(instance.get("instanceId").getTextValue(), asgName);
}
}
}
}
private void refreshOwnerByImage(String region, List<Resource> resources) {
HashSet<String> imageIds = new HashSet<>();
for (Resource resource: resources) {
if (resource.getOwnerEmail() == null) {
imageIds.add(resource.getAdditionalField("imageId"));
}
}
if (imageIds.size() > 0) {
HashMap<String, String> imageToOwner = new HashMap<>();
String baseurl = eddaClient.getBaseUrl(region) + "/aws/images/";
Iterator<String> itr = imageIds.iterator();
long leftToQuery = imageIds.size();
while (leftToQuery > 0) {
long batchcount = leftToQuery > MAX_IMAGE_IDS_PER_QUERY ? MAX_IMAGE_IDS_PER_QUERY : leftToQuery;
leftToQuery -= batchcount;
ArrayList<String> batch = new ArrayList<>();
for(int i=0;i<batchcount; i++) {
batch.add(itr.next());
}
String url = baseurl;
url += StringUtils.join(batch, ',');
url += ";tags.key=owner;public=false;_expand:(imageId,tags:(owner))";
JsonNode imageJsonNode = null;
try {
imageJsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Json node from edda for AMIs in region %s.", region), e);
}
if (imageJsonNode != null) {
for (Iterator<JsonNode> it = imageJsonNode.getElements(); it.hasNext();) {
JsonNode image = it.next();
String imageId = image.get("imageId").getTextValue();
JsonNode tags = image.get("tags");
for (Iterator<JsonNode> tagIt = tags.getElements(); tagIt.hasNext();) {
JsonNode tag = tagIt.next();
if (tag.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY) != null) {
imageToOwner.put(imageId, tag.get(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY).getTextValue());
break;
}
}
}
}
}
if (imageToOwner.size() > 0) {
for (Resource resource: resources) {
if (resource.getOwnerEmail() == null
&& imageToOwner.get(resource.getAdditionalField("imageId")) != null) {
resource.setOwnerEmail(imageToOwner.get(resource.getAdditionalField("imageId")));
LOGGER.info(String.format("Found owner %s for instance %s in AMI %s",
resource.getOwnerEmail(), resource.getId(), resource.getAdditionalField("imageId")));
}
}
}
}
}
}
| 4,857 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaUtils.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.netflix.simianarmy.client.edda.EddaClient;
import org.codehaus.jackson.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
/**
* Misc common Edda Utilities
*/
public class EddaUtils {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaUtils.class);
public static Map<String, String> getAllApplicationOwnerEmails(EddaClient eddaClient) {
String region = "us-east-1";
LOGGER.info(String.format("Getting all application names and emails in region %s.", region));
String url = eddaClient.getBaseUrl(region) + "/netflix/applications/;_expand:(name,email)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (UnknownHostException e) {
LOGGER.warn(String.format("Edda endpoint is not available in region %s", region));
return Collections.emptyMap();
} catch (Exception e) {
throw new RuntimeException(String.format("Failed to get Json node from url: %s", url), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("failed to get valid document from %s, got: %s", url, jsonNode));
}
Iterator<JsonNode> it = jsonNode.getElements();
Map<String, String> appToOwner = new HashMap<String, String>();
while (it.hasNext()) {
JsonNode node = it.next();
String appName = node.get("name").getTextValue().toLowerCase();
String owner = node.get("email").getTextValue();
if (appName != null && owner != null) {
appToOwner.put(appName, owner);
}
}
return appToOwner;
}
}
| 4,858 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaEBSSnapshotJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* The crawler to crawl AWS EBS snapshots for janitor monkey using Edda.
*/
public class EddaEBSSnapshotJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaEBSSnapshotJanitorCrawler.class);
/** The name representing the additional field name of AMIs generated using the snapshot. */
public static final String SNAPSHOT_FIELD_AMIS = "AMIs";
/** The map from snapshot id to the AMI ids that are generated using the snapshot. */
private final Map<String, Collection<String>> snapshotToAMIs = Maps.newHashMap();
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final String defaultOwnerId;
/**
* The constructor.
* @param defaultOwnerId
* the default owner id that snapshots need to have for being crawled, null means no filtering is
* needed
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaEBSSnapshotJanitorCrawler(String defaultOwnerId, EddaClient eddaClient, String... regions) {
this.defaultOwnerId = defaultOwnerId;
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.EBS_SNAPSHOT);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("EBS_SNAPSHOT".equals(resourceType.name())) {
return getSnapshotResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getSnapshotResources(resourceIds);
}
private List<Resource> getSnapshotResources(String... snapshotIds) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getSnapshotResourcesInRegion(region, snapshotIds));
}
return resources;
}
private List<Resource> getSnapshotResourcesInRegion(String region, String... snapshotIds) {
refreshSnapshotToAMIs(region);
String url = eddaClient.getBaseUrl(region) + "/aws/snapshots/";
if (snapshotIds != null && snapshotIds.length != 0) {
url += StringUtils.join(snapshotIds, ',');
LOGGER.info(String.format("Getting snapshots in region %s for %d ids", region, snapshotIds.length));
} else {
LOGGER.info(String.format("Getting all snapshots in region %s", region));
}
url += ";state=completed;_expand:(snapshotId,state,description,startTime,tags,ownerId)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for snapshots in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
// Filter out shared snapshots that do not have the specified owner id.
String ownerId = elem.get("ownerId").getTextValue();
if (defaultOwnerId != null && !defaultOwnerId.equals(ownerId)) {
LOGGER.info(String.format("Ignoring snapshotIds %s since it does not have the specified ownerId.",
elem.get("snapshotId").getTextValue()));
} else {
resources.add(parseJsonElementToSnapshotResource(region, elem));
}
}
return resources;
}
private Resource parseJsonElementToSnapshotResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
long startTime = jsonNode.get("startTime").asLong();
Resource resource = new AWSResource().withId(jsonNode.get("snapshotId").getTextValue()).withRegion(region)
.withResourceType(AWSResourceType.EBS_SNAPSHOT)
.withLaunchTime(new Date(startTime));
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
JsonNode description = jsonNode.get("description");
if (description != null) {
resource.setDescription(description.getTextValue());
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").getTextValue());
Collection<String> amis = snapshotToAMIs.get(resource.getId());
if (amis != null) {
resource.setAdditionalField(SNAPSHOT_FIELD_AMIS, StringUtils.join(amis, ","));
}
resource.setOwnerEmail(getOwnerEmailForResource(resource));
return resource;
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
/**
* Gets the collection of AMIs that are created using a specific snapshot.
* @param snapshotId the snapshot id
*/
protected Collection<String> getAMIsForSnapshot(String snapshotId) {
Collection<String> amis = snapshotToAMIs.get(snapshotId);
if (amis != null) {
return Collections.unmodifiableCollection(amis);
} else {
return Collections.emptyList();
}
}
private void refreshSnapshotToAMIs(String region) {
snapshotToAMIs.clear();
LOGGER.info(String.format("Getting mapping from snapshot to AMIs in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/images/"
+ ";_expand:(imageId,blockDeviceMappings:(ebs:(snapshotId)))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMI mapping in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
String imageId = elem.get("imageId").getTextValue();
JsonNode blockMappings = elem.get("blockDeviceMappings");
if (blockMappings == null || !blockMappings.isArray() || blockMappings.size() == 0) {
continue;
}
for (Iterator<JsonNode> blockMappingsIt = blockMappings.getElements(); blockMappingsIt.hasNext();) {
JsonNode blockMappingNode = blockMappingsIt.next();
JsonNode ebs = blockMappingNode.get("ebs");
if (ebs == null) {
continue;
}
JsonNode snapshotIdNode = ebs.get("snapshotId");
String snapshotId = snapshotIdNode.getTextValue();
LOGGER.debug(String.format("Snapshot %s is used to generate AMI %s", snapshotId, imageId));
Collection<String> amis = snapshotToAMIs.get(snapshotId);
if (amis == null) {
amis = Lists.newArrayList();
snapshotToAMIs.put(snapshotId, amis);
}
amis.add(imageId);
}
}
}
}
| 4,859 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaLaunchConfigJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/**
* The crawler to crawl AWS launch configurations for janitor monkey using Edda.
*/
public class EddaLaunchConfigJanitorCrawler implements JanitorCrawler {
/** The name representing the additional field name of a flag indicating if the launch config
* if used by an auto scaling group. */
public static final String LAUNCH_CONFIG_FIELD_USED_BY_ASG = "USED_BY_ASG";
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaLaunchConfigJanitorCrawler.class);
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
/**
* Instantiates a new basic launch configuration crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaLaunchConfigJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.LAUNCH_CONFIG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("LAUNCH_CONFIG".equals(resourceType.name())) {
return getLaunchConfigResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getLaunchConfigResources(resourceIds);
}
private List<Resource> getLaunchConfigResources(String... launchConfigNames) {
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getLaunchConfigResourcesInRegion(region, launchConfigNames));
}
return resources;
}
@Override
public String getOwnerEmailForResource(Resource resource) {
//Launch Configs don't have Tags
return null;
}
private List<Resource> getLaunchConfigResourcesInRegion(String region, String... launchConfigNames) {
String url = eddaClient.getBaseUrl(region) + "/aws/launchConfigurations;";
if (launchConfigNames != null && launchConfigNames.length != 0) {
url += StringUtils.join(launchConfigNames, ',');
LOGGER.info(String.format("Getting launch configurations in region %s for %d ids",
region, launchConfigNames.length));
} else {
LOGGER.info(String.format("Getting all launch configurations in region %s", region));
}
url += ";_expand:(launchConfigurationName,createdTime)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for instances in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
Set<String> usedLCs = getLaunchConfigsInUse(region);
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode launchConfiguration = it.next();
String lcName = launchConfiguration.get("launchConfigurationName").getTextValue();
Resource lcResource = new AWSResource().withId(lcName)
.withRegion(region).withResourceType(AWSResourceType.LAUNCH_CONFIG)
.withLaunchTime(new Date(launchConfiguration.get("createdTime").getLongValue()));
lcResource.setOwnerEmail(getOwnerEmailForResource(lcResource));
lcResource.setAdditionalField(LAUNCH_CONFIG_FIELD_USED_BY_ASG, String.valueOf(usedLCs.contains(lcName)));
resources.add(lcResource);
}
return resources;
}
/**
* Gets the launch configs that are currently in use by at least one ASG in a region.
* @param region the region
* @return the set of launch config names
*/
private Set<String> getLaunchConfigsInUse(String region) {
LOGGER.info(String.format("Getting all launch configurations in use in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;_expand:(launchConfigurationName)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for launch configs in use in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
Set<String> launchConfigs = Sets.newHashSet();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
launchConfigs.add(it.next().get("launchConfigurationName").getTextValue());
}
return launchConfigs;
}
}
| 4,860 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaImageJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* The crawler to crawl AWS AMIs for janitor monkey using Edda. Only images that are not currently referenced
* by any existing instances or launch configurations are returned.
*/
public class EddaImageJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaImageJanitorCrawler.class);
/** The name representing the additional field name for the last reference time by instance. */
public static final String AMI_FIELD_LAST_INSTANCE_REF_TIME = "Last_Instance_Reference_Time";
/** The name representing the additional field name for the last reference time by launch config. */
public static final String AMI_FIELD_LAST_LC_REF_TIME = "Last_Launch_Config_Reference_Time";
/** The name representing the additional field name for whether the image is a base image. **/
public static final String AMI_FIELD_BASE_IMAGE = "Base_Image";
private static final int BATCH_SIZE = 100;
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Set<String> usedByInstance = Sets.newHashSet();
private final Set<String> usedByLaunchConfig = Sets.newHashSet();
private final Set<String> usedNames = Sets.newHashSet();
protected final Map<String, String> imageIdToName = Maps.newHashMap();
private final Map<String, Long> imageIdToCreationTime = Maps.newHashMap();
private final Set<String> ancestorImageIds = Sets.newHashSet();
private String ownerId;
private final int daysBack;
private static final String IMAGE_ID = "ami-[a-z0-9]{8}";
private static final Pattern BASE_AMI_ID_PATTERN = Pattern.compile("^.*?base_ami_id=(" + IMAGE_ID + ").*?");
private static final Pattern ANCESTOR_ID_PATTERN = Pattern.compile("^.*?ancestor_id=(" + IMAGE_ID + ").*?$");
/**
* Instantiates a new basic AMI crawler.
* @param eddaClient
* the Edda client
* @param daysBack
* the number of days that the crawler checks back in history stored in Edda
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaImageJanitorCrawler(EddaClient eddaClient, String ownerId, int daysBack, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
this.ownerId = ownerId;
Validate.isTrue(daysBack >= 0);
this.daysBack = daysBack;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.IMAGE);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("IMAGE".equals(resourceType.name())) {
return getAMIResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... imageIds) {
return getAMIResources(imageIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getAMIResources(String... imageIds) {
refreshIdToNameMap();
refreshAMIsUsedByInstance();
refreshAMIsUsedByLC();
refreshIdToCreationTime();
for (String excludedId : getExcludedImageIds()) {
String name = imageIdToName.get(excludedId);
usedNames.add(name);
}
LOGGER.info(String.format("%d image names are used across the %d regions.",
usedNames.size(), regions.size()));
Collection<String> excludedImageIds = getExcludedImageIds();
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
try {
resources.addAll(getAMIResourcesInRegion(region, excludedImageIds, imageIds));
} catch (Exception e) {
LOGGER.error("AMI look up failed for {} in {}", imageIds, region, e);
}
}
return resources;
}
/**
* The method allows users to put their own logic to exclude a set of images from being
* cleaned up by Janitor Monkey. In some cases, images are not used but still need to be
* kept longer.
* @return a collection of image ids that need to be excluded from Janitor Monkey
*/
protected Collection<String> getExcludedImageIds() {
return Sets.newHashSet();
}
private JsonNode getImagesInJson(String region, String... imageIds) {
String url = eddaClient.getBaseUrl(region) + "/aws/images";
if (imageIds != null && imageIds.length != 0) {
url += "/" + StringUtils.join(imageIds, ',');
if (imageIds.length == 1) {
url +=","; // Edda will return a non-array if passing exactly one imageId which will fail the crawler
}
LOGGER.info(String.format("Getting unreferenced AMIs in region %s for %d ids", region, imageIds.length));
} else {
LOGGER.info(String.format("Getting all unreferenced AMIs in region %s", region));
if (StringUtils.isNotBlank(ownerId)) {
url += ";ownerId=" + ownerId;
}
}
url += ";_expand:(imageId,name,description,state,tags:(key,value))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMIs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
return jsonNode;
}
private void refreshIdToNameMap() {
imageIdToName.clear();
for (String region : regions) {
JsonNode jsonNode = getImagesInJson(region);
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode ami = it.next();
String imageId = ami.get("imageId").getTextValue();
String name = ami.get("name").getTextValue();
imageIdToName.put(imageId, name);
}
}
LOGGER.info(String.format("Got mapping from image id to name for %d ids", imageIdToName.size()));
}
/**
* AWS doesn't provide creation time for images. We use the ctime (the creation time of the image record in Edda)
* to approximate the creation time of the image.
*/
private void refreshIdToCreationTime() {
for (String region : regions) {
String url = eddaClient.getBaseUrl(region) + "/aws/images";
LOGGER.info(String.format("Getting the creation time for all AMIs in region %s", region));
if (StringUtils.isNotBlank(ownerId)) {
url += ";data.ownerId=" + ownerId;
}
url += ";_expand;_meta:(ctime,data:(imageId))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for creation time of AMIs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String imageId = data.get("imageId").getTextValue();
JsonNode ctimeNode = elem.get("ctime");
if (ctimeNode != null && !ctimeNode.isNull()) {
long ctime = ctimeNode.asLong();
LOGGER.debug(String.format("The image record of %s was created in Edda at %s",
imageId, new DateTime(ctime)));
imageIdToCreationTime.put(imageId, ctime);
}
}
}
LOGGER.info(String.format("Got creation time for %d images", imageIdToCreationTime.size()));
}
private List<Resource> getAMIResourcesInRegion(String region,
Collection<String> excludedImageIds,
String... imageIds) {
JsonNode jsonNode = getImagesInJson(region, imageIds);
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode ami = it.next();
String imageId = ami.get("imageId").getTextValue();
Resource resource = parseJsonElementToresource(region, ami);
String name = ami.get("name").getTextValue();
if (excludedImageIds.contains(imageId)) {
LOGGER.info(String.format("Image %s is excluded from being managed by Janitor Monkey, ignore.",
imageId));
continue;
}
if (usedByInstance.contains(imageId) || usedByLaunchConfig.contains(imageId)) {
LOGGER.info(String.format("AMI %s is referenced by existing instance or launch configuration.",
imageId));
} else {
LOGGER.info(String.format("AMI %s is not referenced by existing instance or launch configuration.",
imageId));
if (usedNames.contains(name)) {
LOGGER.info(String.format("The same AMI name %s is used in another region", name));
} else {
resources.add(resource);
}
}
}
long since = DateTime.now().minusDays(daysBack).getMillis();
addLastReferenceInfo(resources, since);
// Mark the base AMIs that are used as the ancestor of other images
for (Resource resource : resources) {
if (ancestorImageIds.contains(resource.getId())) {
resource.setAdditionalField(AMI_FIELD_BASE_IMAGE, "true");
}
}
return resources;
}
private Resource parseJsonElementToresource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
String imageId = jsonNode.get("imageId").getTextValue();
Resource resource = new AWSResource().withId(imageId).withRegion(region)
.withResourceType(AWSResourceType.IMAGE);
Long creationTime = imageIdToCreationTime.get(imageId);
if (creationTime != null) {
resource.setLaunchTime(new Date(creationTime));
}
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
JsonNode descNode = jsonNode.get("description");
if (descNode != null && !descNode.isNull()) {
String description = descNode.getTextValue();
resource.setDescription(description);
String ancestorImageId = getBaseAmiIdFromDescription(description);
if (ancestorImageId != null && !ancestorImageIds.contains(ancestorImageId)) {
LOGGER.info(String.format("Found base AMI id %s from description '%s'", ancestorImageId, description));
ancestorImageIds.add(ancestorImageId);
}
}
((AWSResource) resource).setAWSResourceState(jsonNode.get("state").getTextValue());
String owner = getOwnerEmailForResource(resource);
if (owner != null) {
resource.setOwnerEmail(owner);
}
return resource;
}
private void refreshAMIsUsedByInstance() {
usedByInstance.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting AMIs used by instances in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/view/instances/;_expand:(imageId)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMIs used by instances in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode img = it.next();
String id = img.get("imageId").getTextValue();
usedByInstance.add(id);
usedNames.add(imageIdToName.get(id));
}
}
LOGGER.info(String.format("Found %d image ids used by instance from Edda", usedByInstance.size()));
}
private void refreshAMIsUsedByLC() {
usedByLaunchConfig.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting AMIs used by launch configs in region %s", region));
String url = eddaClient.getBaseUrl(region) + "/aws/launchConfigurations;_expand:(imageId)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for AMIs used by launch configs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode img = it.next();
String id = img.get("imageId").getTextValue();
usedByLaunchConfig.add(id);
usedNames.add(imageIdToName.get(id));
}
}
LOGGER.info(String.format("Found %d image ids used by launch config from Edda", usedByLaunchConfig.size()));
}
private void addLastReferenceInfo(List<Resource> resources, long since) {
Validate.notNull(resources);
LOGGER.info(String.format("Updating the latest reference info for %d images", resources.size()));
Map<String, List<Resource>> regionToResources = Maps.newHashMap();
for (Resource resource : resources) {
List<Resource> regionalList = regionToResources.get(resource.getRegion());
if (regionalList == null) {
regionalList = Lists.newArrayList();
regionToResources.put(resource.getRegion(), regionalList);
}
regionalList.add(resource);
}
//
for (Map.Entry<String, List<Resource>> entry : regionToResources.entrySet()) {
String region = entry.getKey();
LOGGER.info(String.format("Updating the latest reference info for %d images in region %s",
resources.size(), region));
for (List<Resource> batch : Lists.partition(entry.getValue(), BATCH_SIZE)) {
LOGGER.info(String.format("Processing batch of size %d", batch.size()));
updateReferenceTimeByInstance(region, batch, since);
updateReferenceTimeByLaunchConfig(region, batch, since);
}
}
}
private void updateReferenceTimeByInstance(String region, List<Resource> batch, long since) {
LOGGER.info(String.format("Getting the last reference time by instance for batch of size %d", batch.size()));
String batchUrl = getInstanceBatchUrl(region, batch, since);
JsonNode batchResult = null;
Map<String, Resource> idToResource = Maps.newHashMap();
for (Resource resource : batch) {
idToResource.put(resource.getId(), resource);
}
try {
batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
} catch (IOException e) {
LOGGER.error("Failed to get response for the batch.", e);
}
if (batchResult == null || !batchResult.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
batchUrl, batchResult));
}
for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String imageId = data.get("imageId").getTextValue();
String instanceId = data.get("instanceId").getTextValue();
JsonNode ltimeNode = elem.get("ltime");
if (ltimeNode != null && !ltimeNode.isNull()) {
long ltime = ltimeNode.asLong();
Resource ami = idToResource.get(imageId);
String lastRefTimeByInstance = ami.getAdditionalField(
AMI_FIELD_LAST_INSTANCE_REF_TIME);
if (lastRefTimeByInstance == null || Long.parseLong(lastRefTimeByInstance) < ltime) {
LOGGER.info(String.format("The last time that the image %s was referenced by instance %s is %d",
imageId, instanceId, ltime));
ami.setAdditionalField(AMI_FIELD_LAST_INSTANCE_REF_TIME, String.valueOf(ltime));
}
}
}
}
private void updateReferenceTimeByLaunchConfig(String region, List<Resource> batch, long since) {
LOGGER.info(String.format("Getting the last reference time by launch config for batch of size %d",
batch.size()));
String batchUrl = getLaunchConfigBatchUrl(region, batch, since);
JsonNode batchResult = null;
Map<String, Resource> idToResource = Maps.newHashMap();
for (Resource resource : batch) {
idToResource.put(resource.getId(), resource);
}
try {
batchResult = eddaClient.getJsonNodeFromUrl(batchUrl);
} catch (IOException e) {
LOGGER.error("Failed to get response for the batch.", e);
}
if (batchResult == null || !batchResult.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
batchUrl, batchResult));
}
for (Iterator<JsonNode> it = batchResult.getElements(); it.hasNext();) {
JsonNode elem = it.next();
JsonNode data = elem.get("data");
String imageId = data.get("imageId").getTextValue();
String launchConfigurationName = data.get("launchConfigurationName").getTextValue();
JsonNode ltimeNode = elem.get("ltime");
if (ltimeNode != null && !ltimeNode.isNull()) {
long ltime = ltimeNode.asLong();
Resource ami = idToResource.get(imageId);
String lastRefTimeByLC = ami.getAdditionalField(AMI_FIELD_LAST_LC_REF_TIME);
if (lastRefTimeByLC == null || Long.parseLong(lastRefTimeByLC) < ltime) {
LOGGER.info(String.format(
"The last time that the image %s was referenced by launch config %s is %d",
imageId, launchConfigurationName, ltime));
ami.setAdditionalField(AMI_FIELD_LAST_LC_REF_TIME, String.valueOf(ltime));
}
}
}
}
private String getInstanceBatchUrl(String region, List<Resource> batch, long since) {
StringBuilder batchUrl = new StringBuilder(eddaClient.getBaseUrl(region)
+ "/view/instances/;data.imageId=");
batchUrl.append(getImageIdsString(batch));
batchUrl.append(String.format(";data.state.name=terminated;_since=%d;_expand;_meta:"
+ "(ltime,data:(imageId,instanceId))", since));
return batchUrl.toString();
}
private String getLaunchConfigBatchUrl(String region, List<Resource> batch, long since) {
StringBuilder batchUrl = new StringBuilder(eddaClient.getBaseUrl(region)
+ "/aws/launchConfigurations/;data.imageId=");
batchUrl.append(getImageIdsString(batch));
batchUrl.append(String.format(";_since=%d;_expand;_meta:(ltime,data:(imageId,launchConfigurationName))",
since));
return batchUrl.toString();
}
private String getImageIdsString(List<Resource> resources) {
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for (Resource resource : resources) {
if (!isFirst) {
sb.append(',');
} else {
isFirst = false;
}
sb.append(resource.getId());
}
return sb.toString();
}
private static String getBaseAmiIdFromDescription(String imageDescription) {
// base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44
Matcher matcher = BASE_AMI_ID_PATTERN.matcher(imageDescription);
if (matcher.matches()) {
return matcher.group(1);
}
// store=ebs,ancestor_name=ebs-centosbase-x86_64-20101124,ancestor_id=ami-7b4eb912
matcher = ANCESTOR_ID_PATTERN.matcher(imageDescription);
if (matcher.matches()) {
return matcher.group(1);
}
return null;
}
}
| 4,861 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaASGJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* The crawler to crawl AWS auto scaling groups for janitor monkey using Edda.
*/
public class EddaASGJanitorCrawler implements JanitorCrawler {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaASGJanitorCrawler.class);
/** The name representing the additional field name of instance ids. */
public static final String ASG_FIELD_INSTANCES = "INSTANCES";
/** The name representing the additional field name of max ASG size. */
public static final String ASG_FIELD_MAX_SIZE = "MAX_SIZE";
/** The name representing the additional field name of ELB names. */
public static final String ASG_FIELD_ELBS = "ELBS";
/** The name representing the additional field name of launch configuration name. */
public static final String ASG_FIELD_LC_NAME = "LAUNCH_CONFIGURATION_NAME";
/** The name representing the additional field name of launch configuration creation time. */
public static final String ASG_FIELD_LC_CREATION_TIME = "LAUNCH_CONFIGURATION_CREATION_TIME";
/** The name representing the additional field name of ASG suspension time from ELB. */
public static final String ASG_FIELD_SUSPENSION_TIME = "ASG_SUSPENSION_TIME";
/** The name representing the additional field name of ASG's last change/activity time. */
public static final String ASG_FIELD_LAST_CHANGE_TIME = "ASG_LAST_CHANGE_TIME";
/** The regular expression patter below is for the termination reason added by AWS when
* an ASG is suspended from ELB's traffic.
*/
private static final Pattern SUSPENSION_REASON_PATTERN =
Pattern.compile("User suspended at (\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}).*");
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final Map<String, Map<String, Long>> regionToAsgToLastChangeTime = Maps.newHashMap();
/**
* Instantiates a new basic ASG crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaASGJanitorCrawler(EddaClient eddaClient, String... regions) {
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ASG);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ASG".equals(resourceType.name())) {
return getASGResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... asgNames) {
return getASGResources(asgNames);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
return resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
private List<Resource> getASGResources(String... asgNames) {
refreshAsgLastChangeTime();
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getASGResourcesInRegion(region, asgNames));
}
return resources;
}
private List<Resource> getASGResourcesInRegion(String region, String... asgNames) {
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;";
if (asgNames != null && asgNames.length != 0) {
url += StringUtils.join(asgNames, ',');
LOGGER.info(String.format("Getting ASGs in region %s for %d ids", region, asgNames.length));
} else {
LOGGER.info(String.format("Getting all ASGs in region %s", region));
}
url += ";_expand:(autoScalingGroupName,createdTime,maxSize,suspendedProcesses:(processName,suspensionReason),"
+ "tags:(key,value),instances:(instanceId),loadBalancerNames,launchConfigurationName)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ASGs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
Map<String, Long> lcNameToCreationTime = getLaunchConfigCreationTimes(region);
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToresource(region, it.next(), lcNameToCreationTime));
}
return resources;
}
private Resource parseJsonElementToresource(String region, JsonNode jsonNode
, Map<String, Long> lcNameToCreationTime) {
Validate.notNull(jsonNode);
String asgName = jsonNode.get("autoScalingGroupName").getTextValue();
long createdTime = jsonNode.get("createdTime").getLongValue();
Resource resource = new AWSResource().withId(asgName).withRegion(region)
.withResourceType(AWSResourceType.ASG)
.withLaunchTime(new Date(createdTime));
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
String owner = getOwnerEmailForResource(resource);
if (owner != null) {
resource.setOwnerEmail(owner);
}
JsonNode maxSize = jsonNode.get("maxSize");
if (maxSize != null) {
resource.setAdditionalField(ASG_FIELD_MAX_SIZE, String.valueOf(maxSize.getIntValue()));
}
// Adds instances and ELBs as additional fields.
JsonNode instances = jsonNode.get("instances");
resource.setDescription(String.format("%d instances", instances.size()));
List<String> instanceIds = Lists.newArrayList();
for (Iterator<JsonNode> it = instances.getElements(); it.hasNext();) {
instanceIds.add(it.next().get("instanceId").getTextValue());
}
resource.setAdditionalField(ASG_FIELD_INSTANCES, StringUtils.join(instanceIds, ","));
JsonNode elbs = jsonNode.get("loadBalancerNames");
List<String> elbNames = Lists.newArrayList();
for (Iterator<JsonNode> it = elbs.getElements(); it.hasNext();) {
elbNames.add(it.next().getTextValue());
}
resource.setAdditionalField(ASG_FIELD_ELBS, StringUtils.join(elbNames, ","));
JsonNode lc = jsonNode.get("launchConfigurationName");
if (lc != null) {
String lcName = lc.getTextValue();
Long lcCreationTime = lcNameToCreationTime.get(lcName);
if (lcName != null) {
resource.setAdditionalField(ASG_FIELD_LC_NAME, lcName);
}
if (lcCreationTime != null) {
resource.setAdditionalField(ASG_FIELD_LC_CREATION_TIME, String.valueOf(lcCreationTime));
}
}
// sets the field for the time when the ASG's traffic is suspended from ELB
JsonNode suspendedProcesses = jsonNode.get("suspendedProcesses");
for (Iterator<JsonNode> it = suspendedProcesses.getElements(); it.hasNext();) {
JsonNode sp = it.next();
if ("AddToLoadBalancer".equals(sp.get("processName").getTextValue())) {
String suspensionTime = getSuspensionTimeString(sp.get("suspensionReason").getTextValue());
if (suspensionTime != null) {
LOGGER.info(String.format("Suspension time of ASG %s is %s",
asgName, suspensionTime));
resource.setAdditionalField(ASG_FIELD_SUSPENSION_TIME, suspensionTime);
break;
}
}
}
Long lastChangeTime = regionToAsgToLastChangeTime.get(region).get(asgName);
if (lastChangeTime != null) {
resource.setAdditionalField(ASG_FIELD_LAST_CHANGE_TIME, String.valueOf(lastChangeTime));
}
return resource;
}
private Map<String, Long> getLaunchConfigCreationTimes(String region) {
LOGGER.info(String.format("Getting launch configuration creation times in region %s", region));
String url = eddaClient.getBaseUrl(region)
+ "/aws/launchConfigurations;_expand:(launchConfigurationName,createdTime)";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for lc creation times in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
Map<String, Long> nameToCreationTime = Maps.newHashMap();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode elem = it.next();
nameToCreationTime.put(elem.get("launchConfigurationName").getTextValue(),
elem.get("createdTime").getLongValue());
}
return nameToCreationTime;
}
private String getSuspensionTimeString(String suspensionReason) {
if (suspensionReason == null) {
return null;
}
Matcher matcher = SUSPENSION_REASON_PATTERN.matcher(suspensionReason);
if (matcher.matches()) {
return matcher.group(1);
}
return null;
}
private void refreshAsgLastChangeTime() {
regionToAsgToLastChangeTime.clear();
for (String region : regions) {
LOGGER.info(String.format("Getting ASG last change time in region %s", region));
Map<String, Long> asgToLastChangeTime = regionToAsgToLastChangeTime.get(region);
if (asgToLastChangeTime == null) {
asgToLastChangeTime = Maps.newHashMap();
regionToAsgToLastChangeTime.put(region, asgToLastChangeTime);
}
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;"
+ ";_expand;_meta:(stime,data:(autoScalingGroupName))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ASG last change time in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s",
url, jsonNode));
}
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode asg = it.next();
String asgName = asg.get("data").get("autoScalingGroupName").getTextValue();
Long lastChangeTime = asg.get("stime").asLong();
LOGGER.debug(String.format("The last change time of ASG %s is %s", asgName,
new DateTime(lastChangeTime)));
asgToLastChangeTime.put(asgName, lastChangeTime);
}
}
}
}
| 4,862 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/crawler/edda/EddaELBJanitorCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.crawler.edda;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.ResourceType;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.AWSResourceType;
import com.netflix.simianarmy.basic.BasicSimianArmyContext;
import com.netflix.simianarmy.client.edda.EddaClient;
import com.netflix.simianarmy.janitor.JanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.codehaus.jackson.JsonNode;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* The crawler to crawl AWS instances for janitor monkey using Edda.
*/
public class EddaELBJanitorCrawler implements JanitorCrawler {
class DNSEntry {
String dnsName;
String dnsType;
String hostedZoneId;
};
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(EddaELBJanitorCrawler.class);
private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
private final EddaClient eddaClient;
private final List<String> regions = Lists.newArrayList();
private final boolean useEddaApplicationOwner;
private final String fallbackOwnerEmail;
private Map<String, String> applicationToOwner = new HashMap<String, String>();
/**
* Instantiates a new basic instance crawler.
* @param eddaClient
* the Edda client
* @param regions
* the regions the crawler will crawl resources for
*/
public EddaELBJanitorCrawler(EddaClient eddaClient, String fallbackOwnerEmail, boolean useEddaApplicationOwner, String... regions) {
this.useEddaApplicationOwner = useEddaApplicationOwner;
this.fallbackOwnerEmail = fallbackOwnerEmail;
Validate.notNull(eddaClient);
this.eddaClient = eddaClient;
Validate.notNull(regions);
for (String region : regions) {
this.regions.add(region);
}
}
@Override
public EnumSet<? extends ResourceType> resourceTypes() {
return EnumSet.of(AWSResourceType.ELB);
}
@Override
public List<Resource> resources(ResourceType resourceType) {
if ("ELB".equals(resourceType.name())) {
return getELBResources();
}
return Collections.emptyList();
}
@Override
public List<Resource> resources(String... resourceIds) {
return getELBResources(resourceIds);
}
@Override
public String getOwnerEmailForResource(Resource resource) {
Validate.notNull(resource);
String ownerEmail = null;
if (useEddaApplicationOwner) {
for (String app : applicationToOwner.keySet()) {
if (resource.getId().toLowerCase().startsWith(app)) {
ownerEmail = applicationToOwner.get(app);
break;
}
}
} else {
ownerEmail = resource.getTag(BasicSimianArmyContext.GLOBAL_OWNER_TAGKEY);
}
if (ownerEmail == null) {
ownerEmail = fallbackOwnerEmail;
}
return ownerEmail;
}
private List<Resource> getELBResources(String... instanceIds) {
if (useEddaApplicationOwner) {
applicationToOwner = EddaUtils.getAllApplicationOwnerEmails(eddaClient);
}
List<Resource> resources = Lists.newArrayList();
for (String region : regions) {
resources.addAll(getELBResourcesInRegion(region, instanceIds));
}
return resources;
}
private List<Resource> getELBResourcesInRegion(String region, String... elbNames) {
String url = eddaClient.getBaseUrl(region) + "/aws/loadBalancers";
if (elbNames != null && elbNames.length != 0) {
url += StringUtils.join(elbNames, ',');
LOGGER.info(String.format("Getting ELBs in region %s for %d names", region, elbNames.length));
} else {
LOGGER.info(String.format("Getting all ELBs in region %s", region));
}
url += ";_expand:(loadBalancerName,createdTime,DNSName,instances,tags:(key,value))";
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get Jason node from edda for ELBs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
List<Resource> resources = Lists.newArrayList();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
resources.add(parseJsonElementToELBResource(region, it.next()));
}
Map<String, List<String>> elBtoASGMap = buildELBtoASGMap(region);
for(Resource resource : resources) {
List<String> asgList = elBtoASGMap.get(resource.getId());
if (asgList != null && asgList.size() > 0) {
resource.setAdditionalField("referencedASGCount", "" + asgList.size());
String asgStr = StringUtils.join(asgList,",");
resource.setDescription(resource.getDescription() + ", ASGS=" + asgStr);
LOGGER.debug(String.format("Resource ELB %s is referenced by ASGs %s", resource.getId(), asgStr));
} else {
resource.setAdditionalField("referencedASGCount", "0");
resource.setDescription(resource.getDescription() + ", ASGS=none");
LOGGER.debug(String.format("No ASGs found for ELB %s", resource.getId()));
}
}
Map<String, List<DNSEntry>> elBtoDNSMap = buildELBtoDNSMap(region);
for(Resource resource : resources) {
List<DNSEntry> dnsEntryList = elBtoDNSMap.get(resource.getAdditionalField("DNSName"));
if (dnsEntryList != null && dnsEntryList.size() > 0) {
ArrayList<String> dnsNames = new ArrayList<>();
ArrayList<String> dnsTypes = new ArrayList<>();
ArrayList<String> hostedZoneIds = new ArrayList<>();
for (DNSEntry dnsEntry : dnsEntryList) {
dnsNames.add(dnsEntry.dnsName);
dnsTypes.add(dnsEntry.dnsType);
hostedZoneIds.add(dnsEntry.hostedZoneId);
}
resource.setAdditionalField("referencedDNS", StringUtils.join(dnsNames,","));
resource.setAdditionalField("referencedDNSTypes", StringUtils.join(dnsTypes,","));
resource.setAdditionalField("referencedDNSZones", StringUtils.join(hostedZoneIds,","));
resource.setDescription(resource.getDescription() + ", DNS=" + resource.getAdditionalField("referencedDNS"));
LOGGER.debug(String.format("Resource ELB %s is referenced by DNS %s", resource.getId(), resource.getAdditionalField("referencedDNS")));
} else {
resource.setAdditionalField("referencedDNS", "");
resource.setDescription(resource.getDescription() + ", DNS=none");
LOGGER.debug(String.format("No DNS found for ELB %s", resource.getId()));
}
}
return resources;
}
private Map<String, List<String>> buildELBtoASGMap(String region) {
String url = eddaClient.getBaseUrl(region) + "/aws/autoScalingGroups;_expand:(autoScalingGroupName,loadBalancerNames)";
LOGGER.info(String.format("Getting all ELBs associated with ASGs in region %s", region));
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get JSON node from edda for ASG ELBs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
HashMap<String, List<String>> asgMap = new HashMap<>();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode asgNode = it.next();
String asgName = asgNode.get("autoScalingGroupName").getTextValue();
JsonNode elbs = asgNode.get("loadBalancerNames");
if (elbs == null || !elbs.isArray() || elbs.size() == 0) {
continue;
} else {
for (Iterator<JsonNode> elbNode = elbs.getElements(); elbNode.hasNext();) {
JsonNode elb = elbNode.next();
String elbName = elb.getTextValue();
List<String> asgList = asgMap.get(elbName);
if (asgList == null) {
asgList = new ArrayList<>();
asgMap.put(elbName, asgList);
}
asgList.add(asgName);
LOGGER.debug(String.format("Found ASG %s associated with ELB %s", asgName, elbName));
}
}
}
return asgMap;
}
private Resource parseJsonElementToELBResource(String region, JsonNode jsonNode) {
Validate.notNull(jsonNode);
String elbName = jsonNode.get("loadBalancerName").getTextValue();
long launchTime = jsonNode.get("createdTime").getLongValue();
Resource resource = new AWSResource().withId(elbName).withRegion(region)
.withResourceType(AWSResourceType.ELB)
.withLaunchTime(new Date(launchTime));
String dnsName = jsonNode.get("DNSName").getTextValue();
resource.setAdditionalField("DNSName", dnsName);
JsonNode tags = jsonNode.get("tags");
if (tags == null || !tags.isArray() || tags.size() == 0) {
LOGGER.debug(String.format("No tags is found for %s", resource.getId()));
} else {
for (Iterator<JsonNode> it = tags.getElements(); it.hasNext();) {
JsonNode tag = it.next();
String key = tag.get("key").getTextValue();
String value = tag.get("value").getTextValue();
resource.setTag(key, value);
}
}
String owner = getOwnerEmailForResource(resource);
if (owner != null) {
resource.setOwnerEmail(owner);
}
LOGGER.debug(String.format("Owner of ELB Resource %s (ELB DNS: %s) is %s", resource.getId(), resource.getAdditionalField("DNSName"), resource.getOwnerEmail()));
JsonNode instances = jsonNode.get("instances");
if (instances == null || !instances.isArray() || instances.size() == 0) {
resource.setAdditionalField("instanceCount", "0");
resource.setDescription("instances=none");
LOGGER.debug(String.format("No instances found for ELB %s", resource.getId()));
} else {
resource.setAdditionalField("instanceCount", "" + instances.size());
ArrayList<String> instanceList = new ArrayList<String>(instances.size());
LOGGER.debug(String.format("Found %d instances for ELB %s", instances.size(), resource.getId()));
for (Iterator<JsonNode> it = instances.getElements(); it.hasNext();) {
JsonNode instance = it.next();
String instanceId = instance.get("instanceId").getTextValue();
instanceList.add(instanceId);
}
String instancesStr = StringUtils.join(instanceList,",");
resource.setDescription(String.format("instances=%s", instances));
LOGGER.debug(String.format("Resource ELB %s has instances %s", resource.getId(), instancesStr));
}
return resource;
}
private Map<String, List<DNSEntry>> buildELBtoDNSMap(String region) {
String url = eddaClient.getBaseUrl(region) + "/aws/hostedRecords;_expand:(name,type,aliasTarget,resourceRecords:(value),zone:(id))";
LOGGER.info(String.format("Getting all ELBs associated with DNSs in region %s", region));
JsonNode jsonNode = null;
try {
jsonNode = eddaClient.getJsonNodeFromUrl(url);
} catch (Exception e) {
LOGGER.error(String.format(
"Failed to get JSON node from edda for DNS ELBs in region %s.", region), e);
}
if (jsonNode == null || !jsonNode.isArray()) {
throw new RuntimeException(String.format("Failed to get valid document from %s, got: %s", url, jsonNode));
}
HashMap<String, List<DNSEntry>> dnsMap = new HashMap<>();
for (Iterator<JsonNode> it = jsonNode.getElements(); it.hasNext();) {
JsonNode dnsNode = it.next();
String dnsName = dnsNode.get("name").getTextValue();
String dnsType = dnsNode.get("type").getTextValue();
String hostedZoneId = null;
JsonNode hostedZoneNode = dnsNode.get("zone");
if (hostedZoneNode != null) {
JsonNode hostedZoneIdNode = hostedZoneNode.get("id");
if (hostedZoneIdNode != null) {
hostedZoneId = hostedZoneIdNode.getTextValue();
}
}
JsonNode aliasTarget = dnsNode.get("aliasTarget");
if (aliasTarget != null) {
JsonNode aliasTargetDnsNameNode = aliasTarget.get("DNSName");
if (aliasTargetDnsNameNode != null) {
String aliasTargetDnsName = aliasTargetDnsNameNode.getTextValue();
if (aliasTargetDnsName != null && aliasTargetDnsName.contains(".elb.")) {
DNSEntry dnsEntry = new DNSEntry();
dnsEntry.dnsName = dnsName;
dnsEntry.dnsType = dnsType;
dnsEntry.hostedZoneId = hostedZoneId;
if (aliasTargetDnsName.endsWith(".")) {
aliasTargetDnsName = aliasTargetDnsName.substring(0, aliasTargetDnsName.length()-1);
}
List<DNSEntry> dnsEntryList = dnsMap.get(aliasTargetDnsName);
if (dnsEntryList == null) {
dnsEntryList = new ArrayList<>();
dnsMap.put(aliasTargetDnsName, dnsEntryList);
}
dnsEntryList.add(dnsEntry);
LOGGER.debug(String.format("Found DNS %s (alias) associated with ELB DNS %s, type %s, zone %s", dnsName, aliasTargetDnsName, dnsType, hostedZoneId));
}
}
}
JsonNode records = dnsNode.get("resourceRecords");
if (records == null || !records.isArray() || records.size() == 0) {
continue;
} else {
for (Iterator<JsonNode> recordNode = records.getElements(); recordNode.hasNext();) {
JsonNode record = recordNode.next();
String elbDNS = record.get("value").getTextValue();
if (elbDNS.contains(".elb.")) {
DNSEntry dnsEntry = new DNSEntry();
dnsEntry.dnsName = dnsName;
dnsEntry.dnsType = dnsType;
dnsEntry.hostedZoneId = hostedZoneId;
List<DNSEntry> dnsEntryList = dnsMap.get(elbDNS);
if (dnsEntryList == null) {
dnsEntryList = new ArrayList<>();
dnsMap.put(elbDNS, dnsEntryList);
}
dnsEntryList.add(dnsEntry);
LOGGER.debug(String.format("Found DNS %s associated with ELB DNS %s, type %s, zone %s", dnsName, elbDNS, dnsType, hostedZoneId));
}
}
}
}
return dnsMap;
}
}
| 4,863 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/ami/UnusedImageRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.ami;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.edda.EddaImageJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule class to clean up images that are not used.
*/
public class UnusedImageRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(UnusedImageRule.class);
private final MonkeyCalendar calendar;
private final int retentionDays;
private final int lastReferenceDaysThreshold;
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
* @param lastReferenceDaysThreshold
* The number of days that the image has been not referenced that makes the ASG be
* considered obsolete
*/
public UnusedImageRule(MonkeyCalendar calendar, int retentionDays, int lastReferenceDaysThreshold) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
Validate.isTrue(lastReferenceDaysThreshold >= 0);
this.calendar = calendar;
this.retentionDays = retentionDays;
this.lastReferenceDaysThreshold = lastReferenceDaysThreshold;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"IMAGE".equals(resource.getResourceType().name())) {
return true;
}
if (!"available".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
if ("true".equals(resource.getAdditionalField(EddaImageJanitorCrawler.AMI_FIELD_BASE_IMAGE))) {
LOGGER.info(String.format("Image %s is a base image that is used to create other images",
resource.getId()));
return true;
}
long instanceRefTime = getRefTimeInMilis(resource, EddaImageJanitorCrawler.AMI_FIELD_LAST_INSTANCE_REF_TIME);
long lcRefTime = getRefTimeInMilis(resource, EddaImageJanitorCrawler.AMI_FIELD_LAST_LC_REF_TIME);
Date now = calendar.now().getTime();
long windowStart = new DateTime(now.getTime()).minusDays(lastReferenceDaysThreshold).getMillis();
if (instanceRefTime < windowStart && lcRefTime < windowStart) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(now, retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format("Image not referenced for %d days",
lastReferenceDaysThreshold + retentionDays));
LOGGER.info(String.format(
"Image %s in region %s is marked to be cleaned at %s as it is not referenced"
+ "for more than %d days",
resource.getId(), resource.getRegion(), resource.getExpectedTerminationTime(),
lastReferenceDaysThreshold));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
/**
* Tries to get the long value from the provided field. If the field does not exist, try to use the
* creation time. If both do not exist, use the current time.
*/
private long getRefTimeInMilis(Resource resource, String field) {
String fieldValue = resource.getAdditionalField(field);
long refTime;
if (fieldValue != null) {
refTime = Long.parseLong(fieldValue);
} else if (resource.getLaunchTime() != null) {
LOGGER.info(String.format("No value in field %s is found, use the creation time %s as the ref time of %s",
field, resource.getLaunchTime(), resource.getId()));
refTime = resource.getLaunchTime().getTime();
} else {
// When there is no creation time or ref time is found, we consider the image is referenced.
LOGGER.info(String.format("Use the current time as the ref time of %s", resource.getId()));
refTime = DateTime.now().getMillis();
}
return refTime;
}
}
| 4,864 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/snapshot/NoGeneratedAMIRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.snapshot;
import java.util.Date;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.EBSSnapshotJanitorCrawler;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule is for checking whether an EBS snapshot has any AMIs generated from it.
* If there are no AMIs generated using the snapshot and the snapshot is created
* for certain days, it is marked as a cleanup candidate by this rule.
*/
public class NoGeneratedAMIRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(NoGeneratedAMIRule.class);
private String ownerEmailOverride = null;
private static final String TERMINATION_REASON = "No AMI is generated for this snapshot";
private final MonkeyCalendar calendar;
private final int ageThreshold;
private final int retentionDays;
/** The date format used to print or parse the user specified termination date. **/
public static final DateTimeFormatter TERMINATION_DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd");
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param ageThreshold
* The number of days that a snapshot is considered as cleanup candidate since it is created
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
*/
public NoGeneratedAMIRule(MonkeyCalendar calendar, int ageThreshold, int retentionDays) {
this(calendar, ageThreshold, retentionDays, null);
}
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param ageThreshold
* The number of days that a snapshot is considered as cleanup candidate since it is created
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
* @param ownerEmailOverride
* If null, send notifications to the resource owner.
* If not null, send notifications to the provided owner email address instead of the resource owner.
*/
public NoGeneratedAMIRule(MonkeyCalendar calendar, int ageThreshold, int retentionDays, String ownerEmailOverride) {
Validate.notNull(calendar);
Validate.isTrue(ageThreshold >= 0);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.ageThreshold = ageThreshold;
this.retentionDays = retentionDays;
this.ownerEmailOverride = ownerEmailOverride;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("EBS_SNAPSHOT")) {
return true;
}
if (!"completed".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
String janitorTag = resource.getTag(JanitorMonkey.JANITOR_TAG);
if (janitorTag != null) {
if ("donotmark".equals(janitorTag)) {
LOGGER.info(String.format("The snapshot %s is tagged as not handled by Janitor",
resource.getId()));
return true;
}
try {
// Owners can tag the volume with a termination date in the "janitor" tag.
Date userSpecifiedDate = new Date(TERMINATION_DATE_FORMATTER.parseDateTime(janitorTag).getMillis());
resource.setExpectedTerminationTime(userSpecifiedDate);
resource.setTerminationReason(String.format("User specified termination date %s", janitorTag));
if (ownerEmailOverride != null) {
resource.setOwnerEmail(ownerEmailOverride);
}
return false;
} catch (Exception e) {
LOGGER.error(String.format("The janitor tag is not a user specified date: %s", janitorTag));
}
}
if (hasGeneratedImage(resource)) {
return true;
}
if (resource.getLaunchTime() == null) {
LOGGER.error(String.format("Snapshot %s does not have a creation time.", resource.getId()));
return true;
}
DateTime launchTime = new DateTime(resource.getLaunchTime().getTime());
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (launchTime.plusDays(ageThreshold).isBefore(now)) {
if (ownerEmailOverride != null) {
resource.setOwnerEmail(ownerEmailOverride);
}
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
LOGGER.info(String.format(
"Snapshot %s is marked to be cleaned at %s as there is no AMI generated using it",
resource.getId(), resource.getExpectedTerminationTime()));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
/**
* Gets the AMI created using the snapshot. This method can be overridden by subclasses
* if they use a different way to check this.
* @param resource the snapshot resource
* @return true if there are AMIs that are created using the snapshot, false otherwise
*/
protected boolean hasGeneratedImage(Resource resource) {
return StringUtils.isNotEmpty(resource.getAdditionalField(EBSSnapshotJanitorCrawler.SNAPSHOT_FIELD_AMIS));
}
}
| 4,865 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/elb/OrphanedELBRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.elb;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.apache.commons.lang.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for checking the orphaned instances that do not belong to any ASGs and
* launched for certain days.
*/
public class OrphanedELBRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OrphanedELBRule.class);
private static final String TERMINATION_REASON = "ELB has no instances and is not referenced by any ASG";
private final MonkeyCalendar calendar;
private final int retentionDays;
/**
* Constructor for OrphanedELBRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
*/
public OrphanedELBRule(MonkeyCalendar calendar, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("ELB")) {
return true;
}
String instanceCountStr = resource.getAdditionalField("instanceCount");
String refASGCountStr = resource.getAdditionalField("referencedASGCount");
if (StringUtils.isBlank(instanceCountStr)) {
LOGGER.info(String.format("Resource %s is missing instance count, not marked as a cleanup candidate.", resource.getId()));
return true;
}
if (StringUtils.isBlank(refASGCountStr)) {
LOGGER.info(String.format("Resource %s is missing referenced ASG count, not marked as a cleanup candidate.", resource.getId()));
return true;
}
int instanceCount = NumberUtils.toInt(instanceCountStr);
int refASGCount = NumberUtils.toInt(refASGCountStr);
if (instanceCount == 0 && refASGCount == 0) {
LOGGER.info(String.format("Resource %s is marked as cleanup candidate with 0 instances and 0 referenced ASGs (owner: %s).", resource.getId(), resource.getOwnerEmail()));
markResource(resource);
return false;
} else {
LOGGER.info(String.format("Resource %s is not marked as cleanup candidate with %d instances and %d referenced ASGs.", resource.getId(), instanceCount, refASGCount));
return true;
}
}
private void markResource(Resource resource) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
} else {
LOGGER.info(String.format("Resource %s is already marked as cleanup candidate.", resource.getId()));
}
}
}
| 4,866 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/launchconfig/OldUnusedLaunchConfigRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.launchconfig;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.LaunchConfigJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for detecting the launch configurations that
* 1) have been created for certain days and
* 2) are not used by any auto scaling groups.
*/
public class OldUnusedLaunchConfigRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OldUnusedLaunchConfigRule.class);
private static final String TERMINATION_REASON = "Launch config is not used by any ASG";
private final MonkeyCalendar calendar;
private final int ageThreshold;
private final int retentionDays;
/**
* Constructor for OrphanedInstanceRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param ageThreshold
* The number of days that a launch configuration is considered as a cleanup candidate
* since it is created
* @param retentionDays
* The number of days that the unused launch configuration is retained before being terminated
*/
public OldUnusedLaunchConfigRule(MonkeyCalendar calendar, int ageThreshold, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(ageThreshold >= 0);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.ageThreshold = ageThreshold;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"LAUNCH_CONFIG".equals(resource.getResourceType().name())) {
return true;
}
AWSResource lcResource = (AWSResource) resource;
String usedByASG = lcResource.getAdditionalField(LaunchConfigJanitorCrawler.LAUNCH_CONFIG_FIELD_USED_BY_ASG);
if (StringUtils.isNotEmpty(usedByASG) && !Boolean.parseBoolean(usedByASG)) {
if (resource.getLaunchTime() == null) {
LOGGER.error(String.format("The launch config %s has no creation time.", resource.getId()));
return true;
} else {
DateTime launchTime = new DateTime(resource.getLaunchTime().getTime());
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (now.isBefore(launchTime.plusDays(ageThreshold))) {
LOGGER.info(String.format("The unused launch config %s has not been created for more than %d days",
resource.getId(), ageThreshold));
return true;
}
LOGGER.info(String.format("The unused launch config %s has been created for more than %d days",
resource.getId(), ageThreshold));
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
}
return false;
}
}
return true;
}
}
| 4,867 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/ASGInstanceValidator.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.Resource;
/**
* The interface is for checking whether an ASG has any active instance.
*/
public interface ASGInstanceValidator {
/**
* Checks whether an ASG resource contains any active instances.
* @param resource the ASG resource
* @return true if the ASG contains any active instances, false otherwise.
*/
boolean hasActiveInstance(Resource resource);
} | 4,868 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/DiscoveryASGInstanceValidator.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
/**
* The class is for checking whether an ASG has any active instance using Discovery/Eureka.
* If Discovery/Eureka is enabled, it uses its service to check if the instances in the ASG are
* registered and up there.
*/
public class DiscoveryASGInstanceValidator implements ASGInstanceValidator {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(DiscoveryASGInstanceValidator.class);
private final DiscoveryClient discoveryClient;
/**
* Constructor.
* @param discoveryClient
* the client to access the Discovery/Eureka service for checking the status of instances.
*/
public DiscoveryASGInstanceValidator(DiscoveryClient discoveryClient) {
Validate.notNull(discoveryClient);
this.discoveryClient = discoveryClient;
}
/** {@inheritDoc} */
@Override
public boolean hasActiveInstance(Resource resource) {
String instanceIds = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_INSTANCES);
String maxSizeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_MAX_SIZE);
if (StringUtils.isBlank(instanceIds)) {
if (maxSizeStr != null && Integer.parseInt(maxSizeStr) == 0) {
// The ASG is empty when it has no instance and the max size of the ASG is 0.
// If the max size is not 0, the ASG could probably be in the process of starting new instances.
LOGGER.info(String.format("ASG %s is empty.", resource.getId()));
return false;
} else {
LOGGER.info(String.format("ASG %s does not have instances but the max size is %s",
resource.getId(), maxSizeStr));
return true;
}
}
String[] instances = StringUtils.split(instanceIds, ",");
LOGGER.debug(String.format("Checking if the %d instances in ASG %s are active.",
instances.length, resource.getId()));
for (String instanceId : instances) {
if (isActiveInstance(instanceId)) {
LOGGER.info(String.format("ASG %s has active instance.", resource.getId()));
return true;
}
}
LOGGER.info(String.format("ASG %s has no active instance.", resource.getId()));
return false;
}
/**
* Returns true if the instance is registered in Eureka/Discovery.
* @param instanceId the instance id
* @return true if the instance is active, false otherwise
*/
private boolean isActiveInstance(String instanceId) {
Validate.notNull(instanceId);
LOGGER.debug(String.format("Checking if instance %s is active", instanceId));
List<InstanceInfo> instanceInfos = discoveryClient.getInstancesById(instanceId);
for (InstanceInfo info : instanceInfos) {
InstanceStatus status = info.getStatus();
if (status == InstanceStatus.UP || status == InstanceStatus.STARTING) {
LOGGER.debug(String.format("Instance %s is active in Discovery.", instanceId));
return true;
}
}
LOGGER.debug(String.format("Instance %s is not active in Discovery.", instanceId));
return false;
}
}
| 4,869 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/SuspendedASGRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for detecting the ASGs that 1) have old launch configurations and
* 2) do not have any instances or all instances are inactive in Eureka.
* 3) are not fronted with any ELBs.
*/
public class SuspendedASGRule implements Rule {
private final MonkeyCalendar calendar;
private final int retentionDays;
private final int suspensionAgeThreshold;
private final ASGInstanceValidator instanceValidator;
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SuspendedASGRule.class);
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated after
* being marked
* @param suspensionAgeThreshold
* The number of days that the ASG has been suspended from ELB that makes the ASG be
* considered a cleanup candidate
* @param instanceValidator
* The instance validator to check if an instance is active
*/
public SuspendedASGRule(MonkeyCalendar calendar, int suspensionAgeThreshold, int retentionDays,
ASGInstanceValidator instanceValidator) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
Validate.isTrue(suspensionAgeThreshold >= 0);
Validate.notNull(instanceValidator);
this.calendar = calendar;
this.retentionDays = retentionDays;
this.suspensionAgeThreshold = suspensionAgeThreshold;
this.instanceValidator = instanceValidator;
}
/** {@inheritDoc} */
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"ASG".equals(resource.getResourceType().name())) {
return true;
}
if (instanceValidator.hasActiveInstance(resource)) {
return true;
}
String suspensionTimeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_SUSPENSION_TIME);
if (!StringUtils.isEmpty(suspensionTimeStr)) {
DateTime createTime = ASGJanitorCrawler.SUSPENSION_TIME_FORMATTER.parseDateTime(suspensionTimeStr);
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (now.isBefore(createTime.plusDays(suspensionAgeThreshold))) {
LOGGER.info(String.format("The ASG %s has not been suspended for more than %d days",
resource.getId(), suspensionAgeThreshold));
return true;
}
LOGGER.info(String.format("The ASG %s has been suspended for more than %d days",
resource.getId(), suspensionAgeThreshold));
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format(
"ASG has been disabled for more than %d days and all instances are out of service in Discovery",
suspensionAgeThreshold + retentionDays));
}
return false;
} else {
LOGGER.info(String.format("ASG %s is not suspended from ELB.", resource.getId()));
return true;
}
}
}
| 4,870 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/OldEmptyASGRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
import com.netflix.simianarmy.aws.janitor.crawler.edda.EddaASGJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule for detecting the ASGs that 1) have old launch configurations and
* 2) do not have any instances or all instances are inactive in Eureka.
* 3) are not fronted with any ELBs.
*/
public class OldEmptyASGRule implements Rule {
private final MonkeyCalendar calendar;
private final int retentionDays;
private final int launchConfigAgeThreshold;
private final Integer lastChangeDaysThreshold;
private final ASGInstanceValidator instanceValidator;
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OldEmptyASGRule.class);
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
* @param launchConfigAgeThreshold
* The number of days that the launch configuration for the ASG has been created that makes the ASG be
* considered obsolete
* @param instanceValidator
* The instance validator to check if an instance is active
*/
public OldEmptyASGRule(MonkeyCalendar calendar, int launchConfigAgeThreshold,
int retentionDays, ASGInstanceValidator instanceValidator) {
this(calendar, launchConfigAgeThreshold, null, retentionDays, instanceValidator);
}
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the marked ASG is retained before being terminated
* @param launchConfigAgeThreshold
* The number of days that the launch configuration for the ASG has been created that makes the ASG be
* considered obsolete
* @param lastChangeDaysThreshold
* The number of days that the launch configuration has not been changed. An ASG is considered as a
* cleanup candidate only if it has no change during the last n days. The parameter can be null.
* @param instanceValidator
* The instance validator to check if an instance is active
*/
public OldEmptyASGRule(MonkeyCalendar calendar, int launchConfigAgeThreshold, Integer lastChangeDaysThreshold,
int retentionDays, ASGInstanceValidator instanceValidator) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
Validate.isTrue(launchConfigAgeThreshold >= 0);
Validate.isTrue(lastChangeDaysThreshold == null || lastChangeDaysThreshold >= 0);
Validate.notNull(instanceValidator);
this.calendar = calendar;
this.retentionDays = retentionDays;
this.launchConfigAgeThreshold = launchConfigAgeThreshold;
this.lastChangeDaysThreshold = lastChangeDaysThreshold;
this.instanceValidator = instanceValidator;
}
/** {@inheritDoc} */
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!"ASG".equals(resource.getResourceType().name())) {
return true;
}
if (StringUtils.isNotEmpty(resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_ELBS))) {
LOGGER.info(String.format("ASG %s has ELBs.", resource.getId()));
return true;
}
if (instanceValidator.hasActiveInstance(resource)) {
LOGGER.info(String.format("ASG %s has active instance.", resource.getId()));
return true;
}
String lcName = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_LC_NAME);
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (StringUtils.isEmpty(lcName)) {
LOGGER.error(String.format("Failed to find launch configuration for ASG %s", resource.getId()));
markResource(resource, now);
return false;
}
String lcCreationTime = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_LC_CREATION_TIME);
if (StringUtils.isEmpty(lcCreationTime)) {
LOGGER.error(String.format("Failed to find creation time for launch configuration %s", lcName));
return true;
}
DateTime createTime = new DateTime(Long.parseLong(lcCreationTime));
if (now.isBefore(createTime.plusDays(launchConfigAgeThreshold))) {
LOGGER.info(String.format("The launch configuration %s has not been created for more than %d days",
lcName, launchConfigAgeThreshold));
return true;
}
LOGGER.info(String.format("The launch configuration %s has been created for more than %d days",
lcName, launchConfigAgeThreshold));
if (lastChangeDaysThreshold != null) {
String lastChangeTimeField = resource.getAdditionalField(EddaASGJanitorCrawler.ASG_FIELD_LAST_CHANGE_TIME);
if (StringUtils.isNotBlank(lastChangeTimeField)) {
DateTime lastChangeTime = new DateTime(Long.parseLong(lastChangeTimeField));
if (lastChangeTime.plusDays(lastChangeDaysThreshold).isAfter(now)) {
LOGGER.info(String.format("ASG %s had change during the last %d days",
resource.getId(), lastChangeDaysThreshold));
return true;
}
}
}
markResource(resource, now);
return false;
}
private void markResource(Resource resource, DateTime now) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format(
"Launch config older than %d days. Not in Discovery. No ELB.",
launchConfigAgeThreshold + retentionDays));
} else {
LOGGER.info(String.format("Resource %s is already marked as cleanup candidate.", resource.getId()));
}
}
}
| 4,871 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/asg/DummyASGInstanceValidator.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.asg;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.janitor.crawler.ASGJanitorCrawler;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A dummy implementation of ASGInstanceValidator that considers every instance as active.
*/
public class DummyASGInstanceValidator implements ASGInstanceValidator {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(DummyASGInstanceValidator.class);
/** {@inheritDoc} */
@Override
public boolean hasActiveInstance(Resource resource) {
String instanceIds = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_INSTANCES);
String maxSizeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_MAX_SIZE);
if (StringUtils.isBlank(instanceIds)) {
if (maxSizeStr != null && Integer.parseInt(maxSizeStr) == 0) {
// The ASG is empty when it has no instance and the max size of the ASG is 0.
// If the max size is not 0, the ASG could probably be in the process of starting new instances.
LOGGER.info(String.format("ASG %s is empty.", resource.getId()));
return false;
} else {
LOGGER.info(String.format("ASG %s does not have instances but the max size is %s",
resource.getId(), maxSizeStr));
return true;
}
}
String[] instances = StringUtils.split(instanceIds, ",");
return instances.length > 0;
}
}
| 4,872 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/instance/OrphanedInstanceRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.instance;
import java.util.Date;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.InstanceJanitorCrawler;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule for checking the orphaned instances that do not belong to any ASGs and
* launched for certain days.
*/
public class OrphanedInstanceRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OrphanedInstanceRule.class);
private static final String TERMINATION_REASON = "No ASG is associated with this instance";
private static final String ASG_OR_OPSWORKS_TERMINATION_REASON = "No ASG or OpsWorks stack is associated with this instance";
private final MonkeyCalendar calendar;
private final int instanceAgeThreshold;
private final int retentionDaysWithOwner;
private final int retentionDaysWithoutOwner;
private final boolean respectOpsWorksParentage;
/**
* Constructor for OrphanedInstanceRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param instanceAgeThreshold
* The number of days that an instance is considered as orphaned since it is launched
* @param retentionDaysWithOwner
* The number of days that the orphaned instance is retained before being terminated
* when the instance has an owner specified
* @param retentionDaysWithoutOwner
* The number of days that the orphaned instance is retained before being terminated
* when the instance has no owner specified
* @param respectOpsWorksParentage
* If true, don't consider members of an OpsWorks stack as orphans
*/
public OrphanedInstanceRule(MonkeyCalendar calendar,
int instanceAgeThreshold, int retentionDaysWithOwner, int retentionDaysWithoutOwner, boolean respectOpsWorksParentage) {
Validate.notNull(calendar);
Validate.isTrue(instanceAgeThreshold >= 0);
Validate.isTrue(retentionDaysWithOwner >= 0);
Validate.isTrue(retentionDaysWithoutOwner >= 0);
this.calendar = calendar;
this.instanceAgeThreshold = instanceAgeThreshold;
this.retentionDaysWithOwner = retentionDaysWithOwner;
this.retentionDaysWithoutOwner = retentionDaysWithoutOwner;
this.respectOpsWorksParentage = respectOpsWorksParentage;
}
public OrphanedInstanceRule(MonkeyCalendar calendar,
int instanceAgeThreshold, int retentionDaysWithOwner, int retentionDaysWithoutOwner) {
this(calendar, instanceAgeThreshold, retentionDaysWithOwner, retentionDaysWithoutOwner, false);
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("INSTANCE")) {
// The rule is supposed to only work on AWS instances. If a non-instance resource
// is passed to the rule, the rule simply ignores it and considers it as a valid
// resource not for cleanup.
return true;
}
String awsStatus = ((AWSResource) resource).getAWSResourceState();
if (!"running".equals(awsStatus) || "pending".equals(awsStatus)) {
return true;
}
AWSResource instanceResource = (AWSResource) resource;
String asgName = instanceResource.getAdditionalField(InstanceJanitorCrawler.INSTANCE_FIELD_ASG_NAME);
String opsworkStackName = instanceResource.getAdditionalField(InstanceJanitorCrawler.INSTANCE_FIELD_OPSWORKS_STACK_NAME);
// If there is no ASG AND it isn't an OpsWorks stack (or OpsWorks isn't respected as a parent), we have an orphan
if (StringUtils.isEmpty(asgName) && (!respectOpsWorksParentage || StringUtils.isEmpty(opsworkStackName))) {
if (resource.getLaunchTime() == null) {
LOGGER.error(String.format("The instance %s has no launch time.", resource.getId()));
return true;
} else {
DateTime launchTime = new DateTime(resource.getLaunchTime().getTime());
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (now.isBefore(launchTime.plusDays(instanceAgeThreshold))) {
LOGGER.info(String.format("The orphaned instance %s has not launched for more than %d days",
resource.getId(), instanceAgeThreshold));
return true;
}
LOGGER.info(String.format("The orphaned instance %s has launched for more than %d days",
resource.getId(), instanceAgeThreshold));
if (resource.getExpectedTerminationTime() == null) {
int retentionDays = retentionDaysWithoutOwner;
if (resource.getOwnerEmail() != null) {
retentionDays = retentionDaysWithOwner;
}
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason((respectOpsWorksParentage) ? ASG_OR_OPSWORKS_TERMINATION_REASON : TERMINATION_REASON);
}
return false;
}
}
return true;
}
}
| 4,873 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/generic/TagValueExclusionRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.generic;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.Validate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
/**
* A rule for excluding resources that contain the provided tags (name and value).
*
* If a resource contains the tag and the appropriate value, it will be excluded from any
* other janitor rules and will not be cleaned.
*
*/
public class TagValueExclusionRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(TagValueExclusionRule.class);
private final Map<String,String> tags;
/**
* Constructor for TagValueExclusionRule.
*
* @param tags
* Set of tags and values to match for exclusion
*/
public TagValueExclusionRule(Map<String, String> tags) {
this.tags = tags;
}
/**
* Constructor for TagValueExclusionRule. Use this constructor to pass names and values as separate args.
* This is intended for convenience when specifying tag names/values in property files.
*
* Each tag[i] = (name[i], value[i])
*
* @param names
* Set of names to match for exclusion. Size of names must match size of values.
* @param values
* Set of values to match for exclusion. Size of names must match size of values.
*/
public TagValueExclusionRule(String[] names, String[] values) {
tags = new HashMap<String,String>();
int i = 0;
for(String name : names) {
tags.put(name, values[i]);
i++;
}
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
for (String tagName : tags.keySet()) {
String resourceValue = resource.getTag(tagName);
if (resourceValue != null && resourceValue.equals(tags.get(tagName))) {
LOGGER.debug(String.format("The resource %s has the exclusion tag %s with value %s", resource.getId(), tagName, resourceValue));
return true;
}
}
return false;
}
}
| 4,874 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/generic/UntaggedRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.generic;
import java.util.Date;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule for checking the orphaned instances that do not belong to any ASGs and
* launched for certain days.
*/
public class UntaggedRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(UntaggedRule.class);
private static final String TERMINATION_REASON = "This resource is missing the required tags";
private final MonkeyCalendar calendar;
private final Set<String> tagNames;
private final int retentionDaysWithOwner;
private final int retentionDaysWithoutOwner;
/**
* Constructor for UntaggedInstanceRule.
*
* @param calendar
* The calendar used to calculate the termination time
* @param tagNames
* Set of tags that needs to be set
*/
public UntaggedRule(MonkeyCalendar calendar, Set<String> tagNames, int retentionDaysWithOwner, int retentionDaysWithoutOwner) {
Validate.notNull(calendar);
Validate.notNull(tagNames);
this.calendar = calendar;
this.tagNames = tagNames;
this.retentionDaysWithOwner = retentionDaysWithOwner;
this.retentionDaysWithoutOwner = retentionDaysWithoutOwner;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
for (String tagName : this.tagNames) {
if (((AWSResource) resource).getTag(tagName) == null) {
String terminationReason = String.format(" does not have the required tag %s", tagName);
LOGGER.error(String.format("The resource %s %s", resource.getId(), terminationReason));
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (resource.getExpectedTerminationTime() == null) {
int retentionDays = retentionDaysWithoutOwner;
if (resource.getOwnerEmail() != null) {
retentionDays = retentionDaysWithOwner;
}
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(terminationReason);
}
return false;
} else {
LOGGER.debug(String.format("The resource %s has the required tag %s", resource.getId(), tagName));
}
}
LOGGER.info(String.format("The resource %s has all required tags", resource.getId()));
return true;
}
}
| 4,875 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/volume/DeleteOnTerminationRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.volume;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.crawler.edda.EddaEBSVolumeJanitorCrawler;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.Rule;
import org.apache.commons.lang.Validate;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
/**
* The rule is for checking whether an EBS volume is not attached to any instance and had the
* DeleteOnTermination flag set in the previous attachment. This is an error case that AWS didn't
* handle. The volume should have been deleted as soon as it was detached.
*
* NOTE: since the information came from the history, the rule will work only if Edda is enabled
* for Janitor Monkey.
*/
public class DeleteOnTerminationRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(DeleteOnTerminationRule.class);
private final MonkeyCalendar calendar;
private final int retentionDays;
/** The date format used to print or parse the user specified termination date. **/
private static final DateTimeFormatter TERMINATION_DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd");
/**
* The termination reason for the DeleteOnTerminationRule.
*/
public static final String TERMINATION_REASON = "Not attached and DeleteOnTerminate flag was set";
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
*/
public DeleteOnTerminationRule(MonkeyCalendar calendar, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("EBS_VOLUME")) {
return true;
}
// The state of the volume being "available" means that it is not attached to any instance.
if (!"available".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
String janitorTag = resource.getTag(JanitorMonkey.JANITOR_TAG);
if (janitorTag != null) {
if ("donotmark".equals(janitorTag)) {
LOGGER.info(String.format("The volume %s is tagged as not handled by Janitor",
resource.getId()));
return true;
}
try {
// Owners can tag the volume with a termination date in the "janitor" tag.
Date userSpecifiedDate = new Date(
TERMINATION_DATE_FORMATTER.parseDateTime(janitorTag).getMillis());
resource.setExpectedTerminationTime(userSpecifiedDate);
resource.setTerminationReason(String.format("User specified termination date %s", janitorTag));
return false;
} catch (Exception e) {
LOGGER.error(String.format("The janitor tag is not a user specified date: %s", janitorTag));
}
}
if ("true".equals(resource.getAdditionalField(EddaEBSVolumeJanitorCrawler.DELETE_ON_TERMINATION))) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(calendar.now().getTime(), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(TERMINATION_REASON);
LOGGER.info(String.format(
"Volume %s is marked to be cleaned at %s as it is detached and DeleteOnTermination was set",
resource.getId(), resource.getExpectedTerminationTime()));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
}
| 4,876 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/aws/janitor/rule/volume/OldDetachedVolumeRule.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.aws.janitor.rule.volume;
import java.util.Date;
import java.util.Map;
import org.apache.commons.lang.Validate;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyCalendar;
import com.netflix.simianarmy.Resource;
import com.netflix.simianarmy.aws.AWSResource;
import com.netflix.simianarmy.aws.janitor.VolumeTaggingMonkey;
import com.netflix.simianarmy.janitor.JanitorMonkey;
import com.netflix.simianarmy.janitor.Rule;
/**
* The rule is for checking whether an EBS volume is detached for more than
* certain days. The rule mostly relies on tags on the volume to decide if
* the volume should be marked.
*/
public class OldDetachedVolumeRule implements Rule {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(OldDetachedVolumeRule.class);
private final MonkeyCalendar calendar;
private final int detachDaysThreshold;
private final int retentionDays;
/** The date format used to print or parse the user specified termination date. **/
public static final DateTimeFormatter TERMINATION_DATE_FORMATTER = DateTimeFormat.forPattern("yyyy-MM-dd");
/**
* Constructor.
*
* @param calendar
* The calendar used to calculate the termination time
* @param detachDaysThreshold
* The number of days that a volume is considered as cleanup candidate since it is detached
* @param retentionDays
* The number of days that the volume is retained before being terminated after being marked
* as cleanup candidate
*/
public OldDetachedVolumeRule(MonkeyCalendar calendar, int detachDaysThreshold, int retentionDays) {
Validate.notNull(calendar);
Validate.isTrue(detachDaysThreshold >= 0);
Validate.isTrue(retentionDays >= 0);
this.calendar = calendar;
this.detachDaysThreshold = detachDaysThreshold;
this.retentionDays = retentionDays;
}
@Override
public boolean isValid(Resource resource) {
Validate.notNull(resource);
if (!resource.getResourceType().name().equals("EBS_VOLUME")) {
return true;
}
if (!"available".equals(((AWSResource) resource).getAWSResourceState())) {
return true;
}
String janitorTag = resource.getTag(JanitorMonkey.JANITOR_TAG);
if (janitorTag != null) {
if ("donotmark".equals(janitorTag)) {
LOGGER.info(String.format("The volume %s is tagged as not handled by Janitor",
resource.getId()));
return true;
}
try {
// Owners can tag the volume with a termination date in the "janitor" tag.
Date userSpecifiedDate = new Date(
TERMINATION_DATE_FORMATTER.parseDateTime(janitorTag).getMillis());
resource.setExpectedTerminationTime(userSpecifiedDate);
resource.setTerminationReason(String.format("User specified termination date %s", janitorTag));
return false;
} catch (Exception e) {
LOGGER.error(String.format("The janitor tag is not a user specified date: %s", janitorTag));
}
}
String janitorMetaTag = resource.getTag(JanitorMonkey.JANITOR_META_TAG);
if (janitorMetaTag == null) {
LOGGER.info(String.format("Volume %s is not tagged with the Janitor meta information, ignore.",
resource.getId()));
return true;
}
Map<String, String> metadata = VolumeTaggingMonkey.parseJanitorMetaTag(janitorMetaTag);
String detachTimeTag = metadata.get(JanitorMonkey.DETACH_TIME_TAG_KEY);
if (detachTimeTag == null) {
return true;
}
DateTime detachTime = null;
try {
detachTime = AWSResource.DATE_FORMATTER.parseDateTime(detachTimeTag);
} catch (Exception e) {
LOGGER.error(String.format("Detach time in the JANITOR_META tag of %s is not in the valid format: %s",
resource.getId(), detachTime));
return true;
}
DateTime now = new DateTime(calendar.now().getTimeInMillis());
if (detachTime != null && detachTime.plusDays(detachDaysThreshold).isBefore(now)) {
if (resource.getExpectedTerminationTime() == null) {
Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays);
resource.setExpectedTerminationTime(terminationTime);
resource.setTerminationReason(String.format("Volume not attached for %d days",
detachDaysThreshold + retentionDays));
LOGGER.info(String.format(
"Volume %s is marked to be cleaned at %s as it is detached for more than %d days",
resource.getId(), resource.getExpectedTerminationTime(), detachDaysThreshold));
} else {
LOGGER.info(String.format("Resource %s is already marked.", resource.getId()));
}
return false;
}
return true;
}
}
| 4,877 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/FailEc2ChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Adds entries to /etc/hosts so that EC2 API endpoints are unreachable.
*/
public class FailEc2ChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FailEc2ChaosType(MonkeyConfiguration config) {
super(config, "FailEc2");
}
}
| 4,878 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/BlockAllNetworkTrafficChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.netflix.simianarmy.CloudClient;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Blocks network traffic to/from instance, so it is running but offline.
*
* We actually put the instance into a different security group. First, because AWS requires a SG for some reason.
* Second, because you might well want to continue to allow e.g. SSH inbound.
*/
public class BlockAllNetworkTrafficChaosType extends ChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BlockAllNetworkTrafficChaosType.class);
private final String blockedSecurityGroupName;
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public BlockAllNetworkTrafficChaosType(MonkeyConfiguration config) {
super(config, "BlockAllNetworkTraffic");
this.blockedSecurityGroupName = config.getStrOrElse(getConfigurationPrefix() + "group", "blocked-network");
}
/**
* We can apply the strategy iff the blocked security group is configured.
*/
@Override
public boolean canApply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
if (!cloudClient.canChangeInstanceSecurityGroups(instanceId)) {
LOGGER.info("Not a VPC instance, can't change security groups");
return false;
}
return super.canApply(instance);
}
/**
* Takes the instance off the network.
*/
@Override
public void apply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
if (!cloudClient.canChangeInstanceSecurityGroups(instanceId)) {
throw new IllegalStateException("canApply should have returned false");
}
String groupId = cloudClient.findSecurityGroup(instance.getInstanceId(), blockedSecurityGroupName);
if (groupId == null) {
LOGGER.info("Auto-creating security group {}", blockedSecurityGroupName);
String description = "Empty security group for blocked instances";
groupId = cloudClient.createSecurityGroup(instance.getInstanceId(), blockedSecurityGroupName, description);
}
LOGGER.info("Blocking network traffic by applying security group {} to instance {}", groupId, instanceId);
List<String> groups = Lists.newArrayList();
groups.add(groupId);
cloudClient.setInstanceSecurityGroups(instanceId, groups);
}
}
| 4,879 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/SshConfig.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.io.File;
import java.io.IOException;
import org.jclouds.domain.LoginCredentials;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.base.Strings;
import com.google.common.io.Files;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Holds SSH connection info, used for script-based chaos types.
*/
public class SshConfig {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(SshConfig.class);
/**
* The SSH credentials to log on to an instance.
*/
private final LoginCredentials sshCredentials;
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public SshConfig(MonkeyConfiguration config) {
String sshUser = config.getStrOrElse("simianarmy.chaos.ssh.user", "root");
String privateKey = null;
String sshKeyPath = config.getStrOrElse("simianarmy.chaos.ssh.key", null);
if (sshKeyPath != null) {
sshKeyPath = sshKeyPath.trim();
if (sshKeyPath.startsWith("~/")) {
String home = System.getProperty("user.home");
if (!Strings.isNullOrEmpty(home)) {
if (!home.endsWith("/")) {
home += "/";
}
sshKeyPath = home + sshKeyPath.substring(2);
}
}
LOGGER.debug("Reading SSH key from {}", sshKeyPath);
try {
privateKey = Files.toString(new File(sshKeyPath), Charsets.UTF_8);
} catch (IOException e) {
throw new IllegalStateException("Unable to read the specified SSH key: " + sshKeyPath, e);
}
}
if (privateKey == null) {
this.sshCredentials = LoginCredentials.builder().user(sshUser).build();
} else {
this.sshCredentials = LoginCredentials.builder().user(sshUser).privateKey(privateKey).build();
}
}
/**
* Get the configured SSH credentials.
*
* @return configured SSH credentials
*/
public LoginCredentials getCredentials() {
return sshCredentials;
}
/**
* Check if ssh is configured.
*
* @return true if credentials are configured
*/
public boolean isEnabled() {
return sshCredentials != null;
}
}
| 4,880 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ChaosInstance.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import org.jclouds.domain.LoginCredentials;
import org.jclouds.ssh.SshClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.CloudClient;
/**
* Wrapper around an instance on which we are going to cause chaos.
*/
public class ChaosInstance {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ChaosInstance.class);
private final CloudClient cloudClient;
private final String instanceId;
private final SshConfig sshConfig;
/**
* Constructor.
*
* @param cloudClient
* client for cloud access
* @param instanceId
* id of instance on cloud
* @param sshConfig
* SSH configuration to access instance
*/
public ChaosInstance(CloudClient cloudClient, String instanceId, SshConfig sshConfig) {
this.cloudClient = cloudClient;
this.instanceId = instanceId;
this.sshConfig = sshConfig;
}
/**
* Gets the {@link SshConfig} used to SSH to the instance.
*
* @return the {@link SshConfig}
*/
public SshConfig getSshConfig() {
return sshConfig;
}
/**
* Gets the {@link CloudClient} used to access the cloud.
*
* @return the {@link CloudClient}
*/
public CloudClient getCloudClient() {
return cloudClient;
}
/**
* Returns the instance id to identify the instance to the cloud client.
*
* @return instance id
*/
public String getInstanceId() {
return instanceId;
}
/**
* Memoize canConnectSsh function.
*/
private Boolean canConnectSsh = null;
/**
* Check if the SSH credentials are working.
*
* This is cached for the duration of this object.
*
* @return true iff ssh is configured and able to log on to instance.
*/
public boolean canConnectSsh(ChaosInstance instance) {
if (!sshConfig.isEnabled()) {
return false;
}
if (canConnectSsh == null) {
try {
// It would be nicer to keep this connection open, but then we'd have to be closed.
SshClient client = connectSsh();
client.disconnect();
canConnectSsh = true;
} catch (Exception e) {
LOGGER.warn("Error making SSH connection to instance", e);
canConnectSsh = false;
}
}
return canConnectSsh;
}
/**
* Connect to the instance over SSH.
*
* @return {@link SshClient} for connection
*/
public SshClient connectSsh() {
if (!sshConfig.isEnabled()) {
throw new IllegalStateException();
}
LoginCredentials credentials = sshConfig.getCredentials();
SshClient ssh = cloudClient.connectSsh(instanceId, credentials);
return ssh;
}
}
| 4,881 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ChaosEmailNotifier.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClient;
import com.netflix.simianarmy.aws.AWSEmailNotifier;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
/** The email notifier for Chaos monkey.
*
*/
public abstract class ChaosEmailNotifier extends AWSEmailNotifier {
/** Constructor. Currently the notifier is fixed the email client to
* Amazon Simple Email Service. We can release this restriction when
* we want to support different email clients.
*
* @param sesClient the AWS simple email service client.
*/
public ChaosEmailNotifier(AmazonSimpleEmailServiceClient sesClient) {
super(sesClient);
}
/**
* Sends an email notification for a termination of instance to group
* owner's email address.
* @param group the instance group
* @param instance the instance id
* @param chaosType the chosen chaos strategy
*/
public abstract void sendTerminationNotification(InstanceGroup group, String instance, ChaosType chaosType);
/**
* Sends an email notification for a termination of instance to a global
* email address.
* @param group the instance group
* @param instance the instance id
* @param chaosType the chosen chaos strategy
*/
public abstract void sendTerminationGlobalNotification(InstanceGroup group, String instance, ChaosType chaosType);
}
| 4,882 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/BurnCpuChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Executes a CPU intensive program on the node, using up all available CPU.
*
* This simulates either a noisy CPU neighbor on the box or just a general issue with the CPU.
*/
public class BurnCpuChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public BurnCpuChaosType(MonkeyConfiguration config) {
super(config, "BurnCpu");
}
}
| 4,883 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/DetachVolumesChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.CloudClient;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.basic.chaos.BasicChaosMonkey;
/**
* We force-detach all the EBS volumes.
*
* This is supposed to simulate a catastrophic failure of EBS, however the instance will (possibly) still keep running;
* e.g. it should continue to respond to pings.
*/
public class DetachVolumesChaosType extends ChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BasicChaosMonkey.class);
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public DetachVolumesChaosType(MonkeyConfiguration config) {
super(config, "DetachVolumes");
}
/**
* Strategy can be applied iff there are any EBS volumes attached.
*/
@Override
public boolean canApply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
List<String> volumes = cloudClient.listAttachedVolumes(instanceId, false);
if (volumes.isEmpty()) {
LOGGER.debug("Can't apply strategy: no non-root EBS volumes");
return false;
}
return super.canApply(instance);
}
/**
* Force-detaches all attached EBS volumes from the instance.
*/
@Override
public void apply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
// IDEA: We could have a strategy where we detach some of the volumes...
boolean force = true;
for (String volumeId : cloudClient.listAttachedVolumes(instanceId, false)) {
cloudClient.detachVolume(instanceId, volumeId, force);
}
}
}
| 4,884 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/NullRouteChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Null routes the network, taking a node going offline.
*
* Currently we offline 10.x.x.x (the AWS private network range).
*
* I think the machine will still be publicly accessible, but won't be able to communicate with any other nodes on
* the EC2 network.
*/
public class NullRouteChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public NullRouteChaosType(MonkeyConfiguration config) {
super(config, "NullRoute");
}
}
| 4,885 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/NetworkLatencyChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Introduces network latency using traffic-shaping.
*/
public class NetworkLatencyChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public NetworkLatencyChaosType(MonkeyConfiguration config) {
super(config, "NetworkLatency");
}
}
| 4,886 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ScriptChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.io.IOException;
import java.net.URL;
import org.jclouds.compute.domain.ExecResponse;
import org.jclouds.ssh.SshClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.io.Resources;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Base class for chaos types that run a script over JClouds/SSH on the node.
*/
public abstract class ScriptChaosType extends ChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ScriptChaosType.class);
/**
* Constructor.
*
* @param config
* Configuration to use
* @param key
* Key for the chaos money
*/
public ScriptChaosType(MonkeyConfiguration config, String key) {
super(config, key);
}
/**
* We can apply the strategy iff we can SSH to the instance.
*/
@Override
public boolean canApply(ChaosInstance instance) {
if (!instance.getSshConfig().isEnabled()) {
LOGGER.info("Strategy disabled because SSH credentials not set");
return false;
}
if (!instance.canConnectSsh(instance)) {
LOGGER.warn("Strategy disabled because SSH credentials failed");
return false;
}
return super.canApply(instance);
}
/**
* Runs the script.
*/
@Override
public void apply(ChaosInstance instance) {
LOGGER.info("Running script for {} on instance {}", getKey(), instance.getInstanceId());
SshClient ssh = instance.connectSsh();
String filename = getKey().toLowerCase() + ".sh";
URL url = Resources.getResource(ScriptChaosType.class, "/scripts/" + filename);
String script;
try {
script = Resources.toString(url, Charsets.UTF_8);
} catch (IOException e) {
throw new IllegalStateException("Error reading script resource", e);
}
ssh.put("/tmp/" + filename, script);
ExecResponse response = ssh.exec("/bin/bash /tmp/" + filename);
if (response.getExitStatus() != 0) {
LOGGER.warn("Got non-zero output from running script: {}", response);
}
ssh.disconnect();
}
}
| 4,887 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/KillProcessesChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Kills processes on the node.
*
* This simulates the process crashing (for any reason).
*/
public class KillProcessesChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public KillProcessesChaosType(MonkeyConfiguration config) {
super(config, "KillProcesses");
}
}
| 4,888 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ShutdownInstanceChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.CloudClient;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Shuts down the instance using the cloud instance-termination API.
*
* This is the classic chaos-monkey strategy.
*/
public class ShutdownInstanceChaosType extends ChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public ShutdownInstanceChaosType(MonkeyConfiguration config) {
super(config, "ShutdownInstance");
}
/**
* Shuts down the instance.
*/
@Override
public void apply(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
cloudClient.terminateInstance(instanceId);
}
/**
* We want to default to enabled.
*/
@Override
protected boolean getEnabledDefault() {
return true;
}
}
| 4,889 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/FailDnsChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Blocks TCP and UDP port 53, so DNS resolution fails.
*/
public class FailDnsChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FailDnsChaosType(MonkeyConfiguration config) {
super(config, "FailDns");
}
}
| 4,890 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/FailDynamoDbChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Adds entries to /etc/hosts so that DynamoDB API endpoints are unreachable.
*/
public class FailDynamoDbChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FailDynamoDbChaosType(MonkeyConfiguration config) {
super(config, "FailDynamoDb");
}
}
| 4,891 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/FillDiskChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Creates a huge file on the root device so that the disk fills up.
*/
public class FillDiskChaosType extends ScriptChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(FillDiskChaosType.class);
/**
* Enhancement: As with BurnIoChaosType, it would be nice to randomize the volume.
*
* coryb suggested this, and proposed this script:
*
* nohup dd if=/dev/urandom of=/burn bs=1M count=$(df -ml /burn | awk '/\//{print $2}') iflag=fullblock &
*/
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public FillDiskChaosType(MonkeyConfiguration config) {
super(config, "FillDisk");
}
@Override
public boolean canApply(ChaosInstance instance) {
if (!super.canApply(instance)) {
return false;
}
if (isRootVolumeEbs(instance) && !isBurnMoneyEnabled()) {
LOGGER.debug("Root volume is EBS so FillDisk would cost money; skipping");
return false;
}
return true;
}
}
| 4,892 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ChaosMonkey.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.util.Date;
import java.util.List;
import com.netflix.simianarmy.EventType;
import com.netflix.simianarmy.FeatureNotEnabledException;
import com.netflix.simianarmy.InstanceGroupNotFoundException;
import com.netflix.simianarmy.Monkey;
import com.netflix.simianarmy.MonkeyConfiguration;
import com.netflix.simianarmy.MonkeyRecorder.Event;
import com.netflix.simianarmy.MonkeyType;
/**
* The Class ChaosMonkey.
*/
public abstract class ChaosMonkey extends Monkey {
/**
* The Interface Context.
*/
public interface Context extends Monkey.Context {
/**
* Configuration.
*
* @return the monkey configuration
*/
MonkeyConfiguration configuration();
/**
* Chaos crawler.
*
* @return the chaos crawler
*/
ChaosCrawler chaosCrawler();
/**
* Chaos instance selector.
*
* @return the chaos instance selector
*/
ChaosInstanceSelector chaosInstanceSelector();
/**
* Chaos email notifier.
*
* @return the chaos email notifier
*/
ChaosEmailNotifier chaosEmailNotifier();
}
/** The context. */
private final Context ctx;
/**
* Instantiates a new chaos monkey.
*
* @param ctx
* the context.
*/
public ChaosMonkey(Context ctx) {
super(ctx);
this.ctx = ctx;
}
/**
* The monkey Type.
*/
public enum Type implements MonkeyType {
/** chaos monkey. */
CHAOS
}
/**
* The event types that this monkey causes.
*/
public enum EventTypes implements EventType {
/** The chaos termination. */
CHAOS_TERMINATION, CHAOS_TERMINATION_SKIPPED
}
/** {@inheritDoc} */
@Override
public final Type type() {
return Type.CHAOS;
}
/** {@inheritDoc} */
@Override
public Context context() {
return ctx;
}
/** {@inheritDoc} */
@Override
public abstract void doMonkeyBusiness();
/**
* Gets the count of terminations since a specific time. Chaos should probably not continue to beat up an instance
* group if the count exceeds a threshold.
*
* @param group
* the group
* @return true, if successful
*/
public abstract int getPreviousTerminationCount(ChaosCrawler.InstanceGroup group, Date after);
/**
* Record termination. This is used to notify system owners of terminations and to record terminations so that Chaos
* does not continue to thrash the instance groups on later runs.
*
* @param group
* the group
* @param instance
* the instance
* @return the termination event
*/
public abstract Event recordTermination(ChaosCrawler.InstanceGroup group, String instance, ChaosType chaosType);
/**
* Terminates one instance right away from an instance group when there are available instances.
* @param type
* the type of the instance group
* @param name
* the name of the instance group
* @return the termination event
* @throws FeatureNotEnabledException
* @throws InstanceGroupNotFoundException
*/
public abstract Event terminateNow(String type, String name, ChaosType chaosType)
throws FeatureNotEnabledException, InstanceGroupNotFoundException;
/**
* Sends notification for the termination to the instance owners.
*
* @param group
* the group
* @param instance
* the instance
* @param chaosType
* the chaos monkey strategy that was chosen
*/
public abstract void sendTerminationNotification(ChaosCrawler.InstanceGroup group, String instance,
ChaosType chaosType);
/**
* Gets a list of all enabled chaos types for this ChaosMonkey.
*/
public abstract List<ChaosType> getChaosTypes();
}
| 4,893 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/NetworkCorruptionChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Introduces network packet corruption using traffic-shaping.
*/
public class NetworkCorruptionChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public NetworkCorruptionChaosType(MonkeyConfiguration config) {
super(config, "NetworkCorruption");
}
}
| 4,894 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ChaosInstanceSelector.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.chaos.ChaosCrawler.InstanceGroup;
import java.util.Collection;
/**
* The Interface ChaosInstanceSelector.
*/
public interface ChaosInstanceSelector {
/**
* Select. Pick random instances out of the group with provided probability. Chaos will draw a random number and if
* that random number is lower than probability then it will proceed to select an instance (at random) out of the
* group. If the random number is higher than the provided probability then no instance will be selected and
* <b>null</b> will be returned.
*
* When the probability value is bigger than 1, say N + 0.x, it will first applies the algorithm described above
* with the probability value as 0.x to select possibly one instance, then it will randomly pick N instances.
*
* The probability is the run probability. If Chaos is running hourly between 9am and 3pm with an overall configured
* probability of "1.0" then the probability provided to this routine would be 1.0/6 (6 hours in 9am-3pm). So the
* typical probability here would be .1666. For Chaos to select an instance it will pick a random number between 0
* and 1. If that random number is less than the .1666 it will proceed to select an instance and return it,
* otherwise it will return null. Over 6 runs it is likely that the random number be less than .1666, but it is not
* certain.
*
* To make Chaos select an instance with 100% certainty it would have to be configured to run only once a day and
* the instance group would have to be configured for "1.0" daily probability.
*
* @param group
* the group
* @param probability
* the probability per run that an instance should be terminated.
* @return the instance
*/
Collection<String> select(InstanceGroup group, double probability);
}
| 4,895 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/NetworkLossChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Introduces network packet loss using traffic-shaping.
*/
public class NetworkLossChaosType extends ScriptChaosType {
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public NetworkLossChaosType(MonkeyConfiguration config) {
super(config, "NetworkLoss");
}
}
| 4,896 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ChaosCrawler.java | /*
*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.util.EnumSet;
import java.util.List;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.netflix.simianarmy.GroupType;
/**
* The Interface ChaosCrawler.
*/
public interface ChaosCrawler {
/**
* The Interface InstanceGroup.
*/
public interface InstanceGroup {
/**
* Type.
*
* @return the group type enum
*/
GroupType type();
/**
* Name.
*
* @return the group string
*/
String name();
/**
* Region.
*
* @return the region the group exists in
*/
String region();
/**
* Tags.
*
* @return the list of tags associated with group type
*/
List<TagDescription> tags();
/**
* Instances.
*
* @return the list of instances
*/
List<String> instances();
/**
* Adds the instance.
*
* @param instance
* the instance
*/
void addInstance(String instance);
/**
* Copies the Instance group replacing its name with
* the supplied name.
*
*
* @param name
* @return the new instance group
*/
InstanceGroup copyAs(String name);
}
/**
* Group types.
*
* @return the type of groups this crawler creates \set
*/
EnumSet<?> groupTypes();
/**
* Groups.
*
* @return the list
*/
List<InstanceGroup> groups();
/**
* Gets the up to date information for a collection of group names.
*
* @param names
* the group names
* @return the list of instance groups
*/
List<InstanceGroup> groups(String... names);
}
| 4,897 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/BurnIoChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* Executes a disk I/O intensive program on the node, reducing I/O capacity.
*
* This simulates either a noisy neighbor on the box or just a general issue with the disk.
*/
public class BurnIoChaosType extends ScriptChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(BurnIoChaosType.class);
/**
* Enhancement: It would be nice to target other devices than the root disk.
*
* Considerations:
* 1) EBS activity costs money.
* 2) The root may be on EBS anyway.
* 3) If it's costing money, we might want to stop after a while to stop runaway charges.
*
* coryb suggested this, and proposed something like this:
*
* tmp=$(mktemp)
* df -hl -x tmpfs | awk '/\//{print $6}' > $tmp
* mount=$(sed -n $((RANDOM%$(wc -l < $tmp)+1))p $tmp)
* rm $tmp
*
* And then of=$mount/burn
*
* An alternative might be to run df over SSH, parse it here, and then pass the desired
* path to the script. This keeps the script simpler. I don't think there's an easy way
* to tell the difference between an EBS volume and an instance volume other than from the
* EC2 API.
*/
/**
* Constructor.
*
* @param config
* Configuration to use
*/
public BurnIoChaosType(MonkeyConfiguration config) {
super(config, "BurnIO");
}
@Override
public boolean canApply(ChaosInstance instance) {
if (!super.canApply(instance)) {
return false;
}
if (isRootVolumeEbs(instance) && !isBurnMoneyEnabled()) {
LOGGER.debug("Root volume is EBS so BurnIO would cost money; skipping");
return false;
}
return true;
}
}
| 4,898 |
0 | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy | Create_ds/SimianArmy/src/main/java/com/netflix/simianarmy/chaos/ChaosType.java | /*
*
* Copyright 2013 Justin Santa Barbara.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.simianarmy.chaos;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.simianarmy.CloudClient;
import com.netflix.simianarmy.MonkeyConfiguration;
/**
* A strategy pattern for different types of chaos the chaos monkey can cause.
*/
public abstract class ChaosType {
/** The Constant LOGGER. */
private static final Logger LOGGER = LoggerFactory.getLogger(ChaosType.class);
/**
* Configuration for this chaos type.
*/
private final MonkeyConfiguration config;
/**
* The unique key for the ChaosType.
*/
private final String key;
/**
* Is this strategy enabled?
*/
private final boolean enabled;
/**
* Protected constructor (abstract class).
*
* @param config
* Configuration to use
* @param key
* Unique key for the ChaosType strategy
*/
protected ChaosType(MonkeyConfiguration config, String key) {
this.config = config;
this.key = key;
this.enabled = config.getBoolOrElse(getConfigurationPrefix() + "enabled", getEnabledDefault());
LOGGER.info("ChaosType: {}: enabled={}", key, enabled);
}
/**
* If not specified, controls whether we default to enabled.
*
* Most ChaosTypes should be disabled by default, not least for legacy compatibility, but we want at least one
* strategy to be available.
*/
protected boolean getEnabledDefault() {
return false;
}
/**
* Returns the configuration key prefix to use for this strategy.
*/
protected String getConfigurationPrefix() {
return "simianarmy.chaos." + key.toLowerCase() + ".";
}
/**
* Returns the unique key for the ChaosType.
*/
public String getKey() {
return key;
}
/**
* Checks if this chaos type can be applied to the given instance.
*
* For example, if the strategy was to detach all the EBS volumes, that only makes sense if there are EBS volumes to
* detach.
*/
public boolean canApply(ChaosInstance instance) {
return isEnabled();
}
/**
* Returns whether we are enabled.
*/
public boolean isEnabled() {
return enabled;
}
/**
* Applies this chaos type to the specified instance.
*/
public abstract void apply(ChaosInstance instance);
/**
* Returns the ChaosType with the matching key.
*/
public static ChaosType parse(List<ChaosType> all, String chaosTypeName) {
for (ChaosType chaosType : all) {
if (chaosType.getKey().equalsIgnoreCase(chaosTypeName)) {
return chaosType;
}
}
throw new IllegalArgumentException("Unknown chaos type value: "
+ chaosTypeName);
}
/**
* Returns whether chaos types that cost money are allowed.
*/
protected boolean isBurnMoneyEnabled() {
return config.getBoolOrElse("simianarmy.chaos.burnmoney", false);
}
/**
* Checks whether the root volume of the specified instance is on EBS.
*
* @param instance id of instance
* @return true iff root is on EBS
*/
protected boolean isRootVolumeEbs(ChaosInstance instance) {
CloudClient cloudClient = instance.getCloudClient();
String instanceId = instance.getInstanceId();
List<String> withRoot = cloudClient.listAttachedVolumes(instanceId, true);
List<String> withoutRoot = cloudClient.listAttachedVolumes(instanceId, false);
return (withRoot.size() != withoutRoot.size());
}
}
| 4,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.