comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
The deployment will still be counted as a version change even though it also changes the revision, yes. It's not that stealthy though, since you can see it in what the jobs are doing. Which is why I suggest we just remove the ApplicationChange thing - it was the best we had before we tracked the jobs like this but now it is just a sometimes-incorrect summary of that information.
public void testUpgrading() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("No applications: Nothing to do", 0, tester.buildSystem().jobs().size()); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application conservative0 = tester.createAndDeploy("conservative0", 6, "conservative"); tester.upgrader().maintain(); assertEquals("All already on the right version: Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); assertEquals(version, tester.configServer().lastPrepareVersion().get()); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("One canary pending; nothing else", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Canaries done: Should upgrade defaults", 3, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Normals done: Should upgrade conservatives", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(conservative0, version, "conservative"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.2"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(canary0, version, "canary", DeploymentJobs.JobType.stagingTest); assertEquals("Other Canary was cancelled", 2, tester.buildSystem().jobs().size()); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Version broken, but Canaries should keep trying", 2, tester.buildSystem().jobs().size()); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, canary1, false); tester.clock().advance(Duration.ofHours(1)); tester.deployAndNotify(canary0, DeploymentTester.applicationPackage("canary"), false, DeploymentJobs.JobType.stagingTest); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, canary1, false); version = Version.fromString("5.3"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); assertEquals(version, tester.configServer().lastPrepareVersion().get()); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("One canary pending; nothing else", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Canaries done: Should upgrade defaults", 3, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.updateVersionStatus(version); assertEquals("Not enough evidence to mark this as neither broken nor high", VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); assertEquals("Upgrade with error should retry", 1, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(1)); tester.notifyJobCompletion(DeploymentJobs.JobType.stagingTest, default0, false); tester.deployCompletely("default0"); tester.upgrader().maintain(); tester.deployCompletely("default0"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Normals done: Should upgrade conservatives", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(conservative0, version, "conservative"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("Applications are on 5.3 - nothing to do", 0, tester.buildSystem().jobs().size()); Version version54 = Version.fromString("5.4"); Application default3 = tester.createAndDeploy("default3", 5, "default"); Application default4 = tester.createAndDeploy("default4", 5, "default"); tester.updateVersionStatus(version54); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version54, "canary"); tester.completeUpgrade(canary1, version54, "canary"); tester.updateVersionStatus(version54); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade of defaults are scheduled", 5, tester.buildSystem().jobs().size()); assertEquals(version54, ((Change.VersionChange)tester.application(default0.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default1.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default2.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default3.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default4.id()).deploying().get()).version()); tester.completeUpgrade(default0, version54, "default"); Version version55 = Version.fromString("5.5"); tester.updateVersionStatus(version55); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version55, "canary"); tester.completeUpgrade(canary1, version55, "canary"); tester.updateVersionStatus(version55); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade of defaults are scheduled", 5, tester.buildSystem().jobs().size()); assertEquals(version55, ((Change.VersionChange)tester.application(default0.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default1.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default2.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default3.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default4.id()).deploying().get()).version()); tester.completeUpgrade(default1, version54, "default"); tester.completeUpgrade(default2, version54, "default"); tester.completeUpgradeWithError(default3, version54, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default4, version54, "default", DeploymentJobs.JobType.productionUsWest1); tester.upgrader().maintain(); tester.completeUpgradeWithError(default0, version55, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default1, version55, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default2, version55, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default3, version55, "default", DeploymentJobs.JobType.productionUsWest1); tester.updateVersionStatus(version55); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.clock().advance(Duration.ofHours(1)); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, default3, false); tester.upgrader().maintain(); assertEquals("Upgrade of defaults are scheduled on 5.4 instead, since 5.5 broken: " + "This is default3 since it failed upgrade on both 5.4 and 5.5", 1, tester.buildSystem().jobs().size()); assertEquals("5.4", ((Change.VersionChange)tester.application(default3.id()).deploying().get()).version().toString()); }
public void testUpgrading() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("No applications: Nothing to do", 0, tester.buildSystem().jobs().size()); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application conservative0 = tester.createAndDeploy("conservative0", 6, "conservative"); tester.upgrader().maintain(); assertEquals("All already on the right version: Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); assertEquals(version, tester.configServer().lastPrepareVersion().get()); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("One canary pending; nothing else", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Canaries done: Should upgrade defaults", 3, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Normals done: Should upgrade conservatives", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(conservative0, version, "conservative"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.2"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(canary0, version, "canary", DeploymentJobs.JobType.stagingTest); assertEquals("Other Canary was cancelled", 2, tester.buildSystem().jobs().size()); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Version broken, but Canaries should keep trying", 2, tester.buildSystem().jobs().size()); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, canary1, false); tester.clock().advance(Duration.ofHours(1)); tester.deployAndNotify(canary0, DeploymentTester.applicationPackage("canary"), false, DeploymentJobs.JobType.stagingTest); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, canary1, false); version = Version.fromString("5.3"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); assertEquals(version, tester.configServer().lastPrepareVersion().get()); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("One canary pending; nothing else", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Canaries done: Should upgrade defaults", 3, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.updateVersionStatus(version); assertEquals("Not enough evidence to mark this as neither broken nor high", VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); assertEquals("Upgrade with error should retry", 1, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(1)); tester.notifyJobCompletion(DeploymentJobs.JobType.stagingTest, default0, false); tester.deployCompletely("default0"); tester.upgrader().maintain(); tester.deployCompletely("default0"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Normals done: Should upgrade conservatives", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(conservative0, version, "conservative"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("Applications are on 5.3 - nothing to do", 0, tester.buildSystem().jobs().size()); Version version54 = Version.fromString("5.4"); Application default3 = tester.createAndDeploy("default3", 5, "default"); Application default4 = tester.createAndDeploy("default4", 5, "default"); tester.updateVersionStatus(version54); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version54, "canary"); tester.completeUpgrade(canary1, version54, "canary"); tester.updateVersionStatus(version54); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade of defaults are scheduled", 5, tester.buildSystem().jobs().size()); assertEquals(version54, ((Change.VersionChange)tester.application(default0.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default1.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default2.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default3.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default4.id()).deploying().get()).version()); tester.completeUpgrade(default0, version54, "default"); Version version55 = Version.fromString("5.5"); tester.updateVersionStatus(version55); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version55, "canary"); tester.completeUpgrade(canary1, version55, "canary"); tester.updateVersionStatus(version55); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade of defaults are scheduled", 5, tester.buildSystem().jobs().size()); assertEquals(version55, ((Change.VersionChange)tester.application(default0.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default1.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default2.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default3.id()).deploying().get()).version()); assertEquals(version54, ((Change.VersionChange)tester.application(default4.id()).deploying().get()).version()); tester.completeUpgrade(default1, version54, "default"); tester.completeUpgrade(default2, version54, "default"); tester.completeUpgradeWithError(default3, version54, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default4, version54, "default", DeploymentJobs.JobType.productionUsWest1); tester.upgrader().maintain(); tester.completeUpgradeWithError(default0, version55, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default1, version55, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default2, version55, "default", DeploymentJobs.JobType.stagingTest); tester.completeUpgradeWithError(default3, version55, "default", DeploymentJobs.JobType.productionUsWest1); tester.updateVersionStatus(version55); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.clock().advance(Duration.ofHours(1)); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, default3, false); tester.upgrader().maintain(); assertEquals("Upgrade of defaults are scheduled on 5.4 instead, since 5.5 broken: " + "This is default3 since it failed upgrade on both 5.4 and 5.5", 1, tester.buildSystem().jobs().size()); assertEquals("5.4", ((Change.VersionChange)tester.application(default3.id()).deploying().get()).version().toString()); }
class UpgraderTest { @Test @Test public void testUpgradingToVersionWhichBreaksSomeNonCanaries() { DeploymentTester tester = new DeploymentTester(); tester.upgrader().maintain(); assertEquals("No system version: Nothing to do", 0, tester.buildSystem().jobs().size()); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("No applications: Nothing to do", 0, tester.buildSystem().jobs().size()); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); Application default5 = tester.createAndDeploy("default5", 8, "default"); Application default6 = tester.createAndDeploy("default6", 9, "default"); Application default7 = tester.createAndDeploy("default7", 10, "default"); Application default8 = tester.createAndDeploy("default8", 11, "default"); Application default9 = tester.createAndDeploy("default9", 12, "default"); tester.upgrader().maintain(); assertEquals("All already on the right version: Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); assertEquals(version, tester.configServer().lastPrepareVersion().get()); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("One canary pending; nothing else", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Canaries done: Should upgrade defaults", 10, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default4, version, "default", DeploymentJobs.JobType.systemTest); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); assertEquals("Upgrades are cancelled", 0, tester.buildSystem().jobs().size()); } @Test public void testDeploymentAlreadyInProgressForUpgrade() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .environment(Environment.prod) .region("us-east-3") .build(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3); tester.upgrader().maintain(); assertEquals("Application is on expected version: Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, false, DeploymentJobs.JobType.stagingTest); tester.buildSystem().takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(10)); tester.notifyJobCompletion(DeploymentJobs.JobType.stagingTest, app, false); assertTrue("Retries exhausted", tester.buildSystem().jobs().isEmpty()); assertTrue("Failure is recorded", tester.application(app.id()).deploymentJobs().hasFailures()); assertTrue("Application has pending change", tester.application(app.id()).deploying().isPresent()); version = Version.fromString("5.2"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertTrue("Application still has failures", tester.application(app.id()).deploymentJobs().hasFailures()); assertEquals(1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); tester.upgrader().maintain(); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); } @Test public void testUpgradeCancelledWithDeploymentInProgress() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade scheduled for remaining apps", 5, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertFalse("No change present", tester.applications().require(default4.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default4, true); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test public void testConfidenceIgnoresFailingApplicationChanges() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.completeUpgrade(default3, version, "default"); tester.completeUpgrade(default4, version, "default"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence()); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default0, false); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default1, false); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default2, true); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default3, true); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default2, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default3, false); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); } @Test public void testBlockVersionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T18:00:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(false, true, "tue", "18-19", "UTC") .region("us-west-1") .build(); Application app = tester.createAndDeploy("app1", 1, applicationPackage); version = Version.fromString("5.1"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().maintain(); assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().maintain(); assertFalse("Job is scheduled", tester.buildSystem().jobs().isEmpty()); tester.completeUpgrade(app, version, "canary"); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test public void testBlockVersionChangeHalfwayThough() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:00:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); BlockedChangeDeployer blockedChangeDeployer = new BlockedChangeDeployer(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(false, true, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); Application app = tester.createAndDeploy("app1", 1, applicationPackage); version = Version.fromString("5.1"); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsWest1); assertTrue(tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); blockedChangeDeployer.maintain(); assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); blockedChangeDeployer.maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsCentral1); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } /** * Tests the scenario where a release is deployed to 2 of 3 production zones, then blocked, * followed by timeout of the upgrade and a new release. * In this case, the blocked production zone should not progress with upgrading to the previous version, * and should not upgrade to the new version until the other production zones have it * (expected behavior; both requirements are debatable). */ @Test public void testBlockVersionChangeHalfwayThoughThenNewVersion() { ManualClock clock = new ManualClock(Instant.parse("2017-09-29T16:00:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); BlockedChangeDeployer blockedChangeDeployer = new BlockedChangeDeployer(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(false, true, "mon-fri", "00-09,17-23", "UTC") .blockChange(false, true, "sat-sun", "00-23", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); Application app = tester.createAndDeploy("app1", 1, applicationPackage); version = Version.fromString("5.1"); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsWest1); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsCentral1); assertTrue(tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofDays(1)); version = Version.fromString("5.2"); tester.updateVersionStatus(version); tester.upgrader().maintain(); blockedChangeDeployer.maintain(); assertTrue("Nothing is scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofDays(1)); tester.clock().advance(Duration.ofHours(17)); tester.upgrader().maintain(); blockedChangeDeployer.maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsWest1); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsCentral1); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); for (Deployment deployment : tester.applications().require(app.id()).deployments().values()) assertEquals(version, deployment.version()); } @Test public void testReschedulesUpgradeAfterTimeout() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-west-1") .build(); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); assertEquals(version, default0.deployedVersion().get()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.clock().advance(Duration.ofMinutes(1)); tester.upgrader().maintain(); assertEquals("Upgrade scheduled for remaining apps", 5, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); tester.clock().advance(Duration.ofHours(1)); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default0, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default1, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default2, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default3, false); Application deadLocked = tester.applications().require(default4.id()); assertTrue("Jobs in progress", deadLocked.deploymentJobs().isRunning(tester.controller().applications().deploymentTrigger().jobTimeoutLimit())); assertFalse("No change present", deadLocked.deploying().isPresent()); tester.deployCompletely(default0, applicationPackage); tester.deployCompletely(default1, applicationPackage); tester.deployCompletely(default2, applicationPackage); tester.deployCompletely(default3, applicationPackage); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade scheduled for previously failing apps", 4, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.completeUpgrade(default3, version, "default"); assertEquals(version, tester.application(default0.id()).deployedVersion().get()); assertEquals(version, tester.application(default1.id()).deployedVersion().get()); assertEquals(version, tester.application(default2.id()).deployedVersion().get()); assertEquals(version, tester.application(default3.id()).deployedVersion().get()); } @Test public void testThrottlesUpgrades() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Upgrader upgrader = new Upgrader(tester.controller(), Duration.ofMinutes(10), new JobControl(tester.controllerTester().curator()), tester.controllerTester().curator()); upgrader.setUpgradesPerMinute(0.2); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application dev0 = tester.createApplication("dev0", "tenant1", 7, 1L); tester.controllerTester().deploy(dev0, new Zone(Environment.dev, RegionName.from("dev-region"))); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); upgrader.maintain(); assertEquals(2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); upgrader.maintain(); assertEquals(2, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default2, version, "default"); upgrader.maintain(); assertEquals(2, tester.buildSystem().jobs().size()); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default3, version, "default"); upgrader.maintain(); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } }
class UpgraderTest { @Test @Test public void testUpgradingToVersionWhichBreaksSomeNonCanaries() { DeploymentTester tester = new DeploymentTester(); tester.upgrader().maintain(); assertEquals("No system version: Nothing to do", 0, tester.buildSystem().jobs().size()); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("No applications: Nothing to do", 0, tester.buildSystem().jobs().size()); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); Application default5 = tester.createAndDeploy("default5", 8, "default"); Application default6 = tester.createAndDeploy("default6", 9, "default"); Application default7 = tester.createAndDeploy("default7", 10, "default"); Application default8 = tester.createAndDeploy("default8", 11, "default"); Application default9 = tester.createAndDeploy("default9", 12, "default"); tester.upgrader().maintain(); assertEquals("All already on the right version: Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); assertEquals(version, tester.configServer().lastPrepareVersion().get()); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals("One canary pending; nothing else", 1, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Canaries done: Should upgrade defaults", 10, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default4, version, "default", DeploymentJobs.JobType.systemTest); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); assertEquals("Upgrades are cancelled", 0, tester.buildSystem().jobs().size()); } @Test public void testDeploymentAlreadyInProgressForUpgrade() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .environment(Environment.prod) .region("us-east-3") .build(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3); tester.upgrader().maintain(); assertEquals("Application is on expected version: Nothing to do", 0, tester.buildSystem().jobs().size()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, false, DeploymentJobs.JobType.stagingTest); tester.buildSystem().takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(10)); tester.notifyJobCompletion(DeploymentJobs.JobType.stagingTest, app, false); assertTrue("Retries exhausted", tester.buildSystem().jobs().isEmpty()); assertTrue("Failure is recorded", tester.application(app.id()).deploymentJobs().hasFailures()); assertTrue("Application has pending change", tester.application(app.id()).deploying().isPresent()); version = Version.fromString("5.2"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); assertTrue("Application still has failures", tester.application(app.id()).deploymentJobs().hasFailures()); assertEquals(1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); tester.upgrader().maintain(); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); } @Test public void testUpgradeCancelledWithDeploymentInProgress() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade scheduled for remaining apps", 5, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertFalse("No change present", tester.applications().require(default4.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default4, true); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test public void testConfidenceIgnoresFailingApplicationChanges() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.completeUpgrade(default3, version, "default"); tester.completeUpgrade(default4, version, "default"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence()); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default0, false); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default1, false); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default2, true); tester.notifyJobCompletion(DeploymentJobs.JobType.component, default3, true); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default2, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default3, false); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); } @Test public void testBlockVersionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T18:00:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(false, true, "tue", "18-19", "UTC") .region("us-west-1") .build(); Application app = tester.createAndDeploy("app1", 1, applicationPackage); version = Version.fromString("5.1"); tester.updateVersionStatus(version); tester.upgrader().maintain(); assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().maintain(); assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().maintain(); assertFalse("Job is scheduled", tester.buildSystem().jobs().isEmpty()); tester.completeUpgrade(app, version, "canary"); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test public void testBlockVersionChangeHalfwayThough() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:00:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); BlockedChangeDeployer blockedChangeDeployer = new BlockedChangeDeployer(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(false, true, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); Application app = tester.createAndDeploy("app1", 1, applicationPackage); version = Version.fromString("5.1"); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsWest1); assertTrue(tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); blockedChangeDeployer.maintain(); assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofHours(1)); blockedChangeDeployer.maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsCentral1); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } /** * Tests the scenario where a release is deployed to 2 of 3 production zones, then blocked, * followed by timeout of the upgrade and a new release. * In this case, the blocked production zone should not progress with upgrading to the previous version, * and should not upgrade to the new version until the other production zones have it * (expected behavior; both requirements are debatable). */ @Test public void testBlockVersionChangeHalfwayThoughThenNewVersion() { ManualClock clock = new ManualClock(Instant.parse("2017-09-29T16:00:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); BlockedChangeDeployer blockedChangeDeployer = new BlockedChangeDeployer(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(false, true, "mon-fri", "00-09,17-23", "UTC") .blockChange(false, true, "sat-sun", "00-23", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); Application app = tester.createAndDeploy("app1", 1, applicationPackage); version = Version.fromString("5.1"); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsWest1); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsCentral1); assertTrue(tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofDays(1)); version = Version.fromString("5.2"); tester.updateVersionStatus(version); tester.upgrader().maintain(); blockedChangeDeployer.maintain(); assertTrue("Nothing is scheduled", tester.buildSystem().jobs().isEmpty()); tester.clock().advance(Duration.ofDays(1)); tester.clock().advance(Duration.ofHours(17)); tester.upgrader().maintain(); blockedChangeDeployer.maintain(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsWest1); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsCentral1); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); for (Deployment deployment : tester.applications().require(app.id()).deployments().values()) assertEquals(version, deployment.version()); } @Test public void testReschedulesUpgradeAfterTimeout() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-west-1") .build(); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application default4 = tester.createAndDeploy("default4", 7, "default"); assertEquals(version, default0.deployedVersion().get()); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); tester.upgrader().maintain(); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.clock().advance(Duration.ofMinutes(1)); tester.upgrader().maintain(); assertEquals("Upgrade scheduled for remaining apps", 5, tester.buildSystem().jobs().size()); tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest); tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); tester.clock().advance(Duration.ofHours(1)); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default0, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default1, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default2, false); tester.notifyJobCompletion(DeploymentJobs.JobType.systemTest, default3, false); Application deadLocked = tester.applications().require(default4.id()); assertTrue("Jobs in progress", deadLocked.deploymentJobs().isRunning(tester.controller().applications().deploymentTrigger().jobTimeoutLimit())); assertFalse("No change present", deadLocked.deploying().isPresent()); tester.deployCompletely(default0, applicationPackage); tester.deployCompletely(default1, applicationPackage); tester.deployCompletely(default2, applicationPackage); tester.deployCompletely(default3, applicationPackage); tester.updateVersionStatus(version); assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence()); tester.upgrader().maintain(); assertEquals("Upgrade scheduled for previously failing apps", 4, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default2, version, "default"); tester.completeUpgrade(default3, version, "default"); assertEquals(version, tester.application(default0.id()).deployedVersion().get()); assertEquals(version, tester.application(default1.id()).deployedVersion().get()); assertEquals(version, tester.application(default2.id()).deployedVersion().get()); assertEquals(version, tester.application(default3.id()).deployedVersion().get()); } @Test public void testThrottlesUpgrades() { DeploymentTester tester = new DeploymentTester(); Version version = Version.fromString("5.0"); tester.updateVersionStatus(version); Upgrader upgrader = new Upgrader(tester.controller(), Duration.ofMinutes(10), new JobControl(tester.controllerTester().curator()), tester.controllerTester().curator()); upgrader.setUpgradesPerMinute(0.2); Application canary0 = tester.createAndDeploy("canary0", 1, "canary"); Application canary1 = tester.createAndDeploy("canary1", 2, "canary"); Application default0 = tester.createAndDeploy("default0", 3, "default"); Application default1 = tester.createAndDeploy("default1", 4, "default"); Application default2 = tester.createAndDeploy("default2", 5, "default"); Application default3 = tester.createAndDeploy("default3", 6, "default"); Application dev0 = tester.createApplication("dev0", "tenant1", 7, 1L); tester.controllerTester().deploy(dev0, new Zone(Environment.dev, RegionName.from("dev-region"))); version = Version.fromString("5.1"); tester.updateVersionStatus(version); assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber()); upgrader.maintain(); assertEquals(2, tester.buildSystem().jobs().size()); tester.completeUpgrade(canary0, version, "canary"); tester.completeUpgrade(canary1, version, "canary"); tester.updateVersionStatus(version); upgrader.maintain(); assertEquals(2, tester.buildSystem().jobs().size()); tester.completeUpgrade(default0, version, "default"); tester.completeUpgrade(default2, version, "default"); upgrader.maintain(); assertEquals(2, tester.buildSystem().jobs().size()); tester.completeUpgrade(default1, version, "default"); tester.completeUpgrade(default3, version, "default"); upgrader.maintain(); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } }
Consider having `JcaX509CertificateConverter` as instance field
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { assertCertificateCommonName(certReq.getSubject(), remoteHostname); assertCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIIFICATE_DURATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, false, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
.setProvider(new BouncyCastleProvider())
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { verifyCertificateCommonName(certReq.getSubject(), remoteHostname); verifyCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIFICATE_EXPIRATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, true, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return certificateConverter .setProvider(provider) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; private static final Duration CERTIIFICATE_DURATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = Arrays.asList( Extension.basicConstraints, Extension.subjectAlternativeName); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } static void assertCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } static void assertCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; static final Duration CERTIFICATE_EXPIRATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = ImmutableList.of( Extension.basicConstraints, Extension.subjectAlternativeName); private final JcaX509CertificateConverter certificateConverter = new JcaX509CertificateConverter(); private final Provider provider = new BouncyCastleProvider(); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } /** * Signs the CSR if: * <ul> * <li>Common Name matches {@code remoteHostname}</li> * <li>CSR does not contain any any of the extensions in {@code ILLEGAL_EXTENSIONS}</li> * </ul> */ static void verifyCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } @SuppressWarnings("unchecked") static void verifyCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
I think this extension should be marked as _critical_.
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { assertCertificateCommonName(certReq.getSubject(), remoteHostname); assertCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIIFICATE_DURATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, false, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
.addExtension(Extension.basicConstraints, false, new BasicConstraints(false));
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { verifyCertificateCommonName(certReq.getSubject(), remoteHostname); verifyCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIFICATE_EXPIRATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, true, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return certificateConverter .setProvider(provider) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; private static final Duration CERTIIFICATE_DURATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = Arrays.asList( Extension.basicConstraints, Extension.subjectAlternativeName); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } static void assertCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } static void assertCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; static final Duration CERTIFICATE_EXPIRATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = ImmutableList.of( Extension.basicConstraints, Extension.subjectAlternativeName); private final JcaX509CertificateConverter certificateConverter = new JcaX509CertificateConverter(); private final Provider provider = new BouncyCastleProvider(); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } /** * Signs the CSR if: * <ul> * <li>Common Name matches {@code remoteHostname}</li> * <li>CSR does not contain any any of the extensions in {@code ILLEGAL_EXTENSIONS}</li> * </ul> */ static void verifyCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } @SuppressWarnings("unchecked") static void verifyCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
Fixed.
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { assertCertificateCommonName(certReq.getSubject(), remoteHostname); assertCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIIFICATE_DURATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, false, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
.setProvider(new BouncyCastleProvider())
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { verifyCertificateCommonName(certReq.getSubject(), remoteHostname); verifyCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIFICATE_EXPIRATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, true, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return certificateConverter .setProvider(provider) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; private static final Duration CERTIIFICATE_DURATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = Arrays.asList( Extension.basicConstraints, Extension.subjectAlternativeName); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } static void assertCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } static void assertCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; static final Duration CERTIFICATE_EXPIRATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = ImmutableList.of( Extension.basicConstraints, Extension.subjectAlternativeName); private final JcaX509CertificateConverter certificateConverter = new JcaX509CertificateConverter(); private final Provider provider = new BouncyCastleProvider(); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } /** * Signs the CSR if: * <ul> * <li>Common Name matches {@code remoteHostname}</li> * <li>CSR does not contain any any of the extensions in {@code ILLEGAL_EXTENSIONS}</li> * </ul> */ static void verifyCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } @SuppressWarnings("unchecked") static void verifyCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
Fixed.
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { assertCertificateCommonName(certReq.getSubject(), remoteHostname); assertCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIIFICATE_DURATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, false, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return new JcaX509CertificateConverter() .setProvider(new BouncyCastleProvider()) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
.addExtension(Extension.basicConstraints, false, new BasicConstraints(false));
X509Certificate generateX509Certificate(PKCS10CertificationRequest certReq, String remoteHostname) { verifyCertificateCommonName(certReq.getSubject(), remoteHostname); verifyCertificateExtensions(certReq); Date notBefore = Date.from(clock.instant()); Date notAfter = Date.from(clock.instant().plus(CERTIFICATE_EXPIRATION)); try { PublicKey publicKey = new JcaPKCS10CertificationRequest(certReq).getPublicKey(); X509v3CertificateBuilder caBuilder = new JcaX509v3CertificateBuilder( issuer, BigInteger.valueOf(clock.millis()), notBefore, notAfter, certReq.getSubject(), publicKey) .addExtension(Extension.basicConstraints, true, new BasicConstraints(false)); ContentSigner caSigner = new JcaContentSignerBuilder(SIGNER_ALGORITHM).build(caPrivateKey); return certificateConverter .setProvider(provider) .getCertificate(caBuilder.build(caSigner)); } catch (Exception ex) { log.log(LogLevel.ERROR, "Failed to generate X509 Certificate", ex); throw new RuntimeException("Failed to generate X509 Certificate"); } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; private static final Duration CERTIIFICATE_DURATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = Arrays.asList( Extension.basicConstraints, Extension.subjectAlternativeName); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } static void assertCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } static void assertCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
class CertificateSigner { private static final Logger log = Logger.getLogger(CertificateSigner.class.getName()); static final String SIGNER_ALGORITHM = "SHA256withRSA"; static final Duration CERTIFICATE_EXPIRATION = Duration.ofDays(30); private static final List<ASN1ObjectIdentifier> ILLEGAL_EXTENSIONS = ImmutableList.of( Extension.basicConstraints, Extension.subjectAlternativeName); private final JcaX509CertificateConverter certificateConverter = new JcaX509CertificateConverter(); private final Provider provider = new BouncyCastleProvider(); private final PrivateKey caPrivateKey; private final X500Name issuer; private final Clock clock; public CertificateSigner(KeyProvider keyProvider, AthenzProviderServiceConfig.Zones zoneConfig, String configServerHostname) { this(keyProvider.getPrivateKey(zoneConfig.secretVersion()), configServerHostname, Clock.systemUTC()); } CertificateSigner(PrivateKey caPrivateKey, String configServerHostname, Clock clock) { this.caPrivateKey = caPrivateKey; this.issuer = new X500Name("CN=" + configServerHostname); this.clock = clock; } /** * Signs the CSR if: * <ul> * <li>Common Name matches {@code remoteHostname}</li> * <li>CSR does not contain any any of the extensions in {@code ILLEGAL_EXTENSIONS}</li> * </ul> */ static void verifyCertificateCommonName(X500Name subject, String commonName) { List<AttributeTypeAndValue> attributesAndValues = Arrays.stream(subject.getRDNs()) .flatMap(rdn -> rdn.isMultiValued() ? Stream.of(rdn.getTypesAndValues()) : Stream.of(rdn.getFirst())) .filter(attr -> attr.getType() == BCStyle.CN) .collect(Collectors.toList()); if (attributesAndValues.size() != 1) { throw new IllegalArgumentException("Only 1 common name should be set"); } String actualCommonName = DERUTF8String.getInstance(attributesAndValues.get(0).getValue()).getString(); if (! actualCommonName.equals(commonName)) { throw new IllegalArgumentException("Expected common name to be " + commonName + ", but was " + actualCommonName); } } @SuppressWarnings("unchecked") static void verifyCertificateExtensions(PKCS10CertificationRequest request) { List<String> illegalExt = Arrays .stream(request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) .map(attribute -> Extensions.getInstance(attribute.getAttrValues().getObjectAt(0))) .flatMap(ext -> Collections.list((Enumeration<ASN1ObjectIdentifier>) ext.oids()).stream()) .filter(ILLEGAL_EXTENSIONS::contains) .map(ASN1ObjectIdentifier::getId) .collect(Collectors.toList()); if (! illegalExt.isEmpty()) { throw new IllegalArgumentException("CSR contains illegal extensions: " + String.join(", ", illegalExt)); } } }
I believe these conditions are the same? The only JobType we ever trigger, which does not have a zone, is the component job, unless we are doing something very wrong.
private Version deployVersionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? controller.systemVersion() : deployVersionIn(jobType.zone(controller.system()).get(), controller); }
: deployVersionIn(jobType.zone(controller.system()).get(), controller);
private Version deployVersionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? controller.systemVersion() : deployVersionIn(jobType.zone(controller.system()).get(), controller); }
class LockedApplication extends Application { private final Lock lock; /** * LockedApplication should be acquired through ApplicationController and never constructed directly * * @param application Application instance for which lock has been acquired * @param lock Unused, but must be held when constructing this */ LockedApplication(Application application, Lock lock) { super(application.id(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.deploying(), application.hasOutstandingChange()); this.lock = Objects.requireNonNull(lock, "lock cannot be null"); } public LockedApplication withProjectId(long projectId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withProjectId(projectId), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(IssueId issueId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().with(issueId), deploying(), hasOutstandingChange()), lock); } public LockedApplication withJobCompletion(DeploymentJobs.JobReport report, Instant notificationTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withCompletion(report, notificationTime, controller), deploying(), hasOutstandingChange()), lock); } public LockedApplication withJobTriggering(DeploymentJobs.JobType type, Optional<Change> change, String reason, Instant triggerTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withTriggering(type, change, deployVersionFor(type, controller), deployRevisionFor(type, controller), reason, triggerTime), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(Deployment deployment) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.put(deployment.zone(), deployment); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(DeploymentJobs deploymentJobs) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange()), lock); } public LockedApplication withoutDeploymentIn(Zone zone) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.remove(zone); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication withoutDeploymentJob(DeploymentJobs.JobType jobType) { DeploymentJobs deploymentJobs = deploymentJobs().without(jobType); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange()), lock); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(new Application(id(), deploymentSpec, validationOverrides(), deployments(), deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides, deployments(), deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication withDeploying(Optional<Change> deploying) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying, hasOutstandingChange()), lock); } public LockedApplication withOutstandingChange(boolean outstandingChange) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying(), outstandingChange), lock); } private Optional<ApplicationRevision> deployRevisionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? Optional.empty() : deployRevisionIn(jobType.zone(controller.system()).get()); } /** Returns the revision a new deployment to this zone should use for this application, or empty if we don't know */ private Optional<ApplicationRevision> deployRevisionIn(Zone zone) { if (deploying().isPresent() && deploying().get() instanceof ApplicationChange) return ((Change.ApplicationChange) deploying().get()).revision(); return revisionIn(zone); } /** Returns the revision this application is or should be deployed with in the given zone, or empty if unknown. */ private Optional<ApplicationRevision> revisionIn(Zone zone) { return Optional.ofNullable(deployments().get(zone)).map(Deployment::revision); } }
class LockedApplication extends Application { private final Lock lock; /** * LockedApplication should be acquired through ApplicationController and never constructed directly * * @param application Application instance for which lock has been acquired * @param lock Unused, but must be held when constructing this */ LockedApplication(Application application, Lock lock) { super(application.id(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.deploying(), application.hasOutstandingChange(), application.ownershipIssueId()); this.lock = Objects.requireNonNull(lock, "lock cannot be null"); } public LockedApplication withProjectId(long projectId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withProjectId(projectId), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(IssueId issueId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().with(issueId), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withJobCompletion(DeploymentJobs.JobReport report, Instant notificationTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withCompletion(report, notificationTime, controller), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withJobTriggering(DeploymentJobs.JobType type, Optional<Change> change, String reason, Instant triggerTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withTriggering(type, change, deployVersionFor(type, controller), deployRevisionFor(type, controller), reason, triggerTime), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(Deployment deployment) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.put(deployment.zone(), deployment); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(DeploymentJobs deploymentJobs) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withoutDeploymentIn(Zone zone) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.remove(zone); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withoutDeploymentJob(DeploymentJobs.JobType jobType) { DeploymentJobs deploymentJobs = deploymentJobs().without(jobType); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(new Application(id(), deploymentSpec, validationOverrides(), deployments(), deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides, deployments(), deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withDeploying(Optional<Change> deploying) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying, hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withOutstandingChange(boolean outstandingChange) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying(), outstandingChange, ownershipIssueId()), lock); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying(), hasOutstandingChange(), Optional.of(issueId)), lock); } private Optional<ApplicationRevision> deployRevisionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? Optional.empty() : deployRevisionIn(jobType.zone(controller.system()).get()); } /** Returns the revision a new deployment to this zone should use for this application, or empty if we don't know */ private Optional<ApplicationRevision> deployRevisionIn(Zone zone) { if (deploying().isPresent() && deploying().get() instanceof ApplicationChange) return ((Change.ApplicationChange) deploying().get()).revision(); return revisionIn(zone); } /** Returns the revision this application is or should be deployed with in the given zone, or empty if unknown. */ private Optional<ApplicationRevision> revisionIn(Zone zone) { return Optional.ofNullable(deployments().get(zone)).map(Deployment::revision); } }
Braces, braces and more braces...
private static void validateSslConfig(ConnectorConfig config) { ConnectorConfig.Ssl ssl = config.ssl(); if (ssl.keyStoreType() == JKS) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } } if (ssl.keyStoreType() == PEM) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } if (!ssl.trustStorePath().isEmpty() && ssl.useTrustStorePassword() && ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS truststore"); } }
if (ssl.pemKeyStore().certificatePath().isEmpty()) {
private static void validateSslConfig(ConnectorConfig config) { ConnectorConfig.Ssl ssl = config.ssl(); if (ssl.keyStoreType() == JKS) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } } if (ssl.keyStoreType() == PEM) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } if (!ssl.trustStorePath().isEmpty() && ssl.useTrustStorePassword() && ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS truststore"); } }
class ConnectorFactory { private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName()); private final ConnectorConfig connectorConfig; private final SecretStore secretStore; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SecretStore secretStore) { this.connectorConfig = connectorConfig; this.secretStore = secretStore; if (connectorConfig.ssl().enabled()) validateSslConfig(connectorConfig); } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch) { ServerConnector connector; if (connectorConfig.ssl().enabled()) { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newSslConnectionFactory(), newHttpConnectionFactory()); } else { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newHttpConnectionFactory()); } connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); double soLingerTimeSeconds = connectorConfig.soLingerTime(); if (soLingerTimeSeconds == -1) { connector.setSoLingerTime(-1); } else { connector.setSoLingerTime((int)(soLingerTimeSeconds * 1000.0)); } connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); connector.setStopTimeout((long)(connectorConfig.stopTimeout() * 1000.0)); return connector; } private HttpConnectionFactory newHttpConnectionFactory() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (connectorConfig.ssl().enabled()) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return new HttpConnectionFactory(httpConfig); } private SslConnectionFactory newSslConnectionFactory() { Ssl sslConfig = connectorConfig.ssl(); SslContextFactory factory = new SslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } if (!sslConfig.prng().isEmpty()) { factory.setSecureRandomAlgorithm(sslConfig.prng()); } if (!sslConfig.excludeProtocol().isEmpty()) { String[] prots = new String[sslConfig.excludeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.excludeProtocol(i).name(); } factory.setExcludeProtocols(prots); } if (!sslConfig.includeProtocol().isEmpty()) { String[] prots = new String[sslConfig.includeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.includeProtocol(i).name(); } factory.setIncludeProtocols(prots); } if (!sslConfig.excludeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.excludeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.excludeCipherSuite(i).name(); } factory.setExcludeCipherSuites(ciphs); } if (!sslConfig.includeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.includeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.includeCipherSuite(i).name(); } factory.setIncludeCipherSuites(ciphs); } String keyDbPassword = sslConfig.keyDbKey(); switch (sslConfig.keyStoreType()) { case PEM: factory.setKeyStore(createPemKeyStore(sslConfig.pemKeyStore())); break; case JKS: factory.setKeyStorePath(sslConfig.keyStorePath()); factory.setKeyStoreType(sslConfig.keyStoreType().toString()); factory.setKeyStorePassword(secretStore.getSecret(keyDbPassword)); break; } if (!sslConfig.trustStorePath().isEmpty()) { factory.setTrustStorePath(sslConfig.trustStorePath()); factory.setTrustStoreType(sslConfig.trustStoreType().toString()); if (sslConfig.useTrustStorePassword()) { factory.setTrustStorePassword(secretStore.getSecret(keyDbPassword)); } } factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm()); factory.setProtocol(sslConfig.protocol()); return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString()); } private static KeyStore createPemKeyStore(PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath) .loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
class ConnectorFactory { private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName()); private final ConnectorConfig connectorConfig; private final SecretStore secretStore; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SecretStore secretStore) { this.connectorConfig = connectorConfig; this.secretStore = secretStore; if (connectorConfig.ssl().enabled()) validateSslConfig(connectorConfig); } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch) { ServerConnector connector; if (connectorConfig.ssl().enabled()) { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newSslConnectionFactory(), newHttpConnectionFactory()); } else { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newHttpConnectionFactory()); } connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); double soLingerTimeSeconds = connectorConfig.soLingerTime(); if (soLingerTimeSeconds == -1) { connector.setSoLingerTime(-1); } else { connector.setSoLingerTime((int)(soLingerTimeSeconds * 1000.0)); } connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); connector.setStopTimeout((long)(connectorConfig.stopTimeout() * 1000.0)); return connector; } private HttpConnectionFactory newHttpConnectionFactory() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (connectorConfig.ssl().enabled()) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return new HttpConnectionFactory(httpConfig); } private SslConnectionFactory newSslConnectionFactory() { Ssl sslConfig = connectorConfig.ssl(); SslContextFactory factory = new SslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } if (!sslConfig.prng().isEmpty()) { factory.setSecureRandomAlgorithm(sslConfig.prng()); } if (!sslConfig.excludeProtocol().isEmpty()) { String[] prots = new String[sslConfig.excludeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.excludeProtocol(i).name(); } factory.setExcludeProtocols(prots); } if (!sslConfig.includeProtocol().isEmpty()) { String[] prots = new String[sslConfig.includeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.includeProtocol(i).name(); } factory.setIncludeProtocols(prots); } if (!sslConfig.excludeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.excludeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.excludeCipherSuite(i).name(); } factory.setExcludeCipherSuites(ciphs); } if (!sslConfig.includeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.includeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.includeCipherSuite(i).name(); } factory.setIncludeCipherSuites(ciphs); } String keyDbPassword = sslConfig.keyDbKey(); switch (sslConfig.keyStoreType()) { case PEM: factory.setKeyStore(createPemKeyStore(sslConfig.pemKeyStore())); break; case JKS: factory.setKeyStorePath(sslConfig.keyStorePath()); factory.setKeyStoreType(sslConfig.keyStoreType().toString()); factory.setKeyStorePassword(secretStore.getSecret(keyDbPassword)); break; } if (!sslConfig.trustStorePath().isEmpty()) { factory.setTrustStorePath(sslConfig.trustStorePath()); factory.setTrustStoreType(sslConfig.trustStoreType().toString()); if (sslConfig.useTrustStorePassword()) { factory.setTrustStorePassword(secretStore.getSecret(keyDbPassword)); } } factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm()); factory.setProtocol(sslConfig.protocol()); return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString()); } private static KeyStore createPemKeyStore(PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath) .loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
Inconsistent use of whitespace around '!'
private static void validateSslConfig(ConnectorConfig config) { ConnectorConfig.Ssl ssl = config.ssl(); if (ssl.keyStoreType() == JKS) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } } if (ssl.keyStoreType() == PEM) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } if (!ssl.trustStorePath().isEmpty() && ssl.useTrustStorePassword() && ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS truststore"); } }
if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) {
private static void validateSslConfig(ConnectorConfig config) { ConnectorConfig.Ssl ssl = config.ssl(); if (ssl.keyStoreType() == JKS) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } } if (ssl.keyStoreType() == PEM) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } if (!ssl.trustStorePath().isEmpty() && ssl.useTrustStorePassword() && ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS truststore"); } }
class ConnectorFactory { private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName()); private final ConnectorConfig connectorConfig; private final SecretStore secretStore; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SecretStore secretStore) { this.connectorConfig = connectorConfig; this.secretStore = secretStore; if (connectorConfig.ssl().enabled()) validateSslConfig(connectorConfig); } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch) { ServerConnector connector; if (connectorConfig.ssl().enabled()) { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newSslConnectionFactory(), newHttpConnectionFactory()); } else { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newHttpConnectionFactory()); } connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); double soLingerTimeSeconds = connectorConfig.soLingerTime(); if (soLingerTimeSeconds == -1) { connector.setSoLingerTime(-1); } else { connector.setSoLingerTime((int)(soLingerTimeSeconds * 1000.0)); } connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); connector.setStopTimeout((long)(connectorConfig.stopTimeout() * 1000.0)); return connector; } private HttpConnectionFactory newHttpConnectionFactory() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (connectorConfig.ssl().enabled()) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return new HttpConnectionFactory(httpConfig); } private SslConnectionFactory newSslConnectionFactory() { Ssl sslConfig = connectorConfig.ssl(); SslContextFactory factory = new SslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } if (!sslConfig.prng().isEmpty()) { factory.setSecureRandomAlgorithm(sslConfig.prng()); } if (!sslConfig.excludeProtocol().isEmpty()) { String[] prots = new String[sslConfig.excludeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.excludeProtocol(i).name(); } factory.setExcludeProtocols(prots); } if (!sslConfig.includeProtocol().isEmpty()) { String[] prots = new String[sslConfig.includeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.includeProtocol(i).name(); } factory.setIncludeProtocols(prots); } if (!sslConfig.excludeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.excludeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.excludeCipherSuite(i).name(); } factory.setExcludeCipherSuites(ciphs); } if (!sslConfig.includeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.includeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.includeCipherSuite(i).name(); } factory.setIncludeCipherSuites(ciphs); } String keyDbPassword = sslConfig.keyDbKey(); switch (sslConfig.keyStoreType()) { case PEM: factory.setKeyStore(createPemKeyStore(sslConfig.pemKeyStore())); break; case JKS: factory.setKeyStorePath(sslConfig.keyStorePath()); factory.setKeyStoreType(sslConfig.keyStoreType().toString()); factory.setKeyStorePassword(secretStore.getSecret(keyDbPassword)); break; } if (!sslConfig.trustStorePath().isEmpty()) { factory.setTrustStorePath(sslConfig.trustStorePath()); factory.setTrustStoreType(sslConfig.trustStoreType().toString()); if (sslConfig.useTrustStorePassword()) { factory.setTrustStorePassword(secretStore.getSecret(keyDbPassword)); } } factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm()); factory.setProtocol(sslConfig.protocol()); return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString()); } private static KeyStore createPemKeyStore(PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath) .loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
class ConnectorFactory { private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName()); private final ConnectorConfig connectorConfig; private final SecretStore secretStore; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SecretStore secretStore) { this.connectorConfig = connectorConfig; this.secretStore = secretStore; if (connectorConfig.ssl().enabled()) validateSslConfig(connectorConfig); } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch) { ServerConnector connector; if (connectorConfig.ssl().enabled()) { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newSslConnectionFactory(), newHttpConnectionFactory()); } else { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newHttpConnectionFactory()); } connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); double soLingerTimeSeconds = connectorConfig.soLingerTime(); if (soLingerTimeSeconds == -1) { connector.setSoLingerTime(-1); } else { connector.setSoLingerTime((int)(soLingerTimeSeconds * 1000.0)); } connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); connector.setStopTimeout((long)(connectorConfig.stopTimeout() * 1000.0)); return connector; } private HttpConnectionFactory newHttpConnectionFactory() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (connectorConfig.ssl().enabled()) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return new HttpConnectionFactory(httpConfig); } private SslConnectionFactory newSslConnectionFactory() { Ssl sslConfig = connectorConfig.ssl(); SslContextFactory factory = new SslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } if (!sslConfig.prng().isEmpty()) { factory.setSecureRandomAlgorithm(sslConfig.prng()); } if (!sslConfig.excludeProtocol().isEmpty()) { String[] prots = new String[sslConfig.excludeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.excludeProtocol(i).name(); } factory.setExcludeProtocols(prots); } if (!sslConfig.includeProtocol().isEmpty()) { String[] prots = new String[sslConfig.includeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.includeProtocol(i).name(); } factory.setIncludeProtocols(prots); } if (!sslConfig.excludeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.excludeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.excludeCipherSuite(i).name(); } factory.setExcludeCipherSuites(ciphs); } if (!sslConfig.includeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.includeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.includeCipherSuite(i).name(); } factory.setIncludeCipherSuites(ciphs); } String keyDbPassword = sslConfig.keyDbKey(); switch (sslConfig.keyStoreType()) { case PEM: factory.setKeyStore(createPemKeyStore(sslConfig.pemKeyStore())); break; case JKS: factory.setKeyStorePath(sslConfig.keyStorePath()); factory.setKeyStoreType(sslConfig.keyStoreType().toString()); factory.setKeyStorePassword(secretStore.getSecret(keyDbPassword)); break; } if (!sslConfig.trustStorePath().isEmpty()) { factory.setTrustStorePath(sslConfig.trustStorePath()); factory.setTrustStoreType(sslConfig.trustStoreType().toString()); if (sslConfig.useTrustStorePassword()) { factory.setTrustStorePassword(secretStore.getSecret(keyDbPassword)); } } factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm()); factory.setProtocol(sslConfig.protocol()); return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString()); } private static KeyStore createPemKeyStore(PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath) .loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
New entry point.
private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) { Cursor tenantObject = tenantArray.addObject(); tenantObject.setString("tenant", tenant.getId().id()); toSlime(tenantObject, tenant, request, true); } return new SlimeJsonResponse(slime); }
}
private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Application application, HttpRequest request) { if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (request.getBooleanProperty("recursive")) toSlime(object, application, request); else object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.getId().id()); object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) { if (request.getBooleanProperty("recursive")) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
Change behaviour if `recursive=true`.
private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); }
}
private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) { Cursor tenantObject = tenantArray.addObject(); tenantObject.setString("tenant", tenant.getId().id()); toSlime(tenantObject, tenant, request, true); } return new SlimeJsonResponse(slime); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Application application, HttpRequest request) { if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (request.getBooleanProperty("recursive")) toSlime(object, application, request); else object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.getId().id()); object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) { if (request.getBooleanProperty("recursive")) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
Change behaviour if `recursive=true`.
private void toSlime(Cursor object, Application application, HttpRequest request) { if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } }
else
private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) { Cursor tenantObject = tenantArray.addObject(); tenantObject.setString("tenant", tenant.getId().id()); toSlime(tenantObject, tenant, request, true); } return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (request.getBooleanProperty("recursive")) toSlime(object, application, request); else object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.getId().id()); object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) { if (request.getBooleanProperty("recursive")) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
Change behaviour if `recursive=true`.
private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (request.getBooleanProperty("recursive")) toSlime(object, application, request); else object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); }
else
private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) { Cursor tenantObject = tenantArray.addObject(); tenantObject.setString("tenant", tenant.getId().id()); toSlime(tenantObject, tenant, request, true); } return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Application application, HttpRequest request) { if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; private final AthenzClientFactory athenzClientFactory; @Inject public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer, AthenzClientFactory athenzClientFactory) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; this.athenzClientFactory = athenzClientFactory; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return request.getBooleanProperty("recursive") ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(new TenantId((tenantName))) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); if (application.deploying().isPresent()) { Cursor deployingObject = object.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus().values()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } object.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(application.id()); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); if (request.getBooleanProperty("recursive")) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(application.id()); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + deploymentId.zone().environment() + "/" + deploymentId.zone().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); URI elkUrl = controller.getElkUri(deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zone().environment(), deploymentId.zone().region()); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); controller.applications().get(deploymentId.applicationId()).flatMap(application -> application.deploymentJobs().projectId()) .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return tenant(updatedTenant, request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthenzDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthenzDomainAdmin(new AthenzDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return tenant(tenant, request, true); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthenzDomain tenantDomain = new AthenzDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthenzDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthenz(tenantid, tenantDomain, propertyId, property, nToken); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.zoneRegistry(), athenzClientFactory); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.getId().id()); object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) { if (request.getBooleanProperty("recursive")) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } tenant.getPropertyId().ifPresent(propertyId -> { try { object.setString("propertyUrl", controller.organization().propertyUri(propertyId).toString()); object.setString("contactsUrl", controller.organization().contactsUri(propertyId).toString()); object.setString("issueCreationUrl", controller.organization().issueCreationUri(propertyId).toString()); Cursor lists = object.setArray("contacts"); for (List<? extends User> contactList : controller.organization().contactsFor(propertyId)) { Cursor list = lists.addArray(); for (User contact : contactList) list.addString(contact.displayName()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Error fetching property info for " + tenant + " with propertyId " + propertyId, e); } }); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthenzDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } }
part id not necessary here
private List<Method> receiveFileMethod(Object handler) { List<Method> methods = new ArrayList<>(); methods.add(new Method(RECEIVE_META_METHOD, "ssl", "ii", handler,"receiveFileMeta") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "filelength", "length in bytes of file") .returnDesc(0, "ret", "0 if success, 1 otherwise") .returnDesc(1, "session-id", "Session id to be used for this transfer")); methods.add(new Method(RECEIVE_PART_METHOD, "siix", "i", handler,"receiveFilePart") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "data", "bytes in this part") .returnDesc(0, "ret", "0 if success, 1 otherwise")); methods.add(new Method(RECEIVE_EOF_METHOD, "siilis", "i", handler,"receiveFileEof") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "crc-code", "crc code (xxhash64)") .paramDesc(4, "error-code", "Error code. 0 if none") .paramDesc(5, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 if crc mismatch, 2 otherwise")); methods.add(new Method(RECEIVE_METHOD, "ssxlis", "i", handler, "receiveFile") .methodDesc("receive file reference content") .paramDesc(0, "file reference", "file reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "content", "array of bytes") .paramDesc(3, "hash", "xx64hash of the file content") .paramDesc(4, "errorcode", "Error code. 0 if none") .paramDesc(5, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 otherwise")); return methods; }
.paramDesc(2, "partid", "relative part number starting at zero")
private List<Method> receiveFileMethod(Object handler) { List<Method> methods = new ArrayList<>(); methods.add(new Method(RECEIVE_META_METHOD, "ssl", "ii", handler,"receiveFileMeta") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "filelength", "length in bytes of file") .returnDesc(0, "ret", "0 if success, 1 otherwise") .returnDesc(1, "session-id", "Session id to be used for this transfer")); methods.add(new Method(RECEIVE_PART_METHOD, "siix", "i", handler,"receiveFilePart") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "data", "bytes in this part") .returnDesc(0, "ret", "0 if success, 1 otherwise")); methods.add(new Method(RECEIVE_EOF_METHOD, "silis", "i", handler,"receiveFileEof") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "crc-code", "crc code (xxhash64)") .paramDesc(3, "error-code", "Error code. 0 if none") .paramDesc(4, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 if crc mismatch, 2 otherwise")); methods.add(new Method(RECEIVE_METHOD, "ssxlis", "i", handler, "receiveFile") .methodDesc("receive file reference content") .paramDesc(0, "file reference", "file reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "content", "array of bytes") .paramDesc(3, "hash", "xx64hash of the file content") .paramDesc(4, "errorcode", "Error code. 0 if none") .paramDesc(5, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 otherwise")); return methods; }
class FileReceiver { private final static Logger log = Logger.getLogger(FileReceiver.class.getName()); private final static String RECEIVE_METHOD = "filedistribution.receiveFile"; private final static String RECEIVE_META_METHOD = "filedistribution.receiveFileMeta"; private final static String RECEIVE_PART_METHOD = "filedistribution.receiveFilePart"; private final static String RECEIVE_EOF_METHOD = "filedistribution.receiveFileEof"; private final Supervisor supervisor; private final FileReferenceDownloader downloader; private final File downloadDirectory; private final XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); public FileReceiver(Supervisor supervisor, FileReferenceDownloader downloader, File downloadDirectory) { this.supervisor = supervisor; this.downloader = downloader; this.downloadDirectory = downloadDirectory; registerMethods(); } private void registerMethods() { receiveFileMethod(this).forEach((method) -> supervisor.addMethod(method)); } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFile(Request req) { FileReference fileReference = new FileReference(req.parameters().get(0).asString()); String filename = req.parameters().get(1).asString(); byte[] content = req.parameters().get(2).asData(); long xxhash = req.parameters().get(3).asInt64(); int errorCode = req.parameters().get(4).asInt32(); String errorDescription = req.parameters().get(5).asString(); if (errorCode == 0) { log.log(LogLevel.INFO, "Receiving file reference '" + fileReference.value() + "'"); receiveFile(fileReference, filename, content, xxhash); req.returnValues().add(new Int32Value(0)); } else { log.log(LogLevel.WARNING, "Receiving file reference '" + fileReference.value() + "' failed: " + errorDescription); req.returnValues().add(new Int32Value(1)); } } void receiveFile(FileReference fileReference, String filename, byte[] content, long xxHash) { long xxHashFromContent = hasher.hash(ByteBuffer.wrap(content), 0); if (xxHashFromContent != xxHash) throw new RuntimeException("xxhash from content (" + xxHashFromContent + ") is not equal to xxhash in request (" + xxHash + ")"); File fileReferenceDir = new File(downloadDirectory, fileReference.value()); try { Files.createDirectories(fileReferenceDir.toPath()); File file = new File(fileReferenceDir, filename); log.log(LogLevel.INFO, "Writing data to " + file.getAbsolutePath()); Files.write(file.toPath(), content); downloader.completedDownloading(fileReference, file); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage()); throw new RuntimeException("Failed writing file: ", e); } } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFileMeta(Request req) { log.info("Received method call '" + req.methodName() + "' with parameters : " + req.parameters()); } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFilePart(Request req) { log.info("Received method call '" + req.methodName() + "' with parameters : " + req.parameters()); } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFileEof(Request req) { log.info("Received method call '" + req.methodName() + "' with parameters : " + req.parameters()); } }
class FileReceiver { private final static Logger log = Logger.getLogger(FileReceiver.class.getName()); private final static String RECEIVE_METHOD = "filedistribution.receiveFile"; private final static String RECEIVE_META_METHOD = "filedistribution.receiveFileMeta"; private final static String RECEIVE_PART_METHOD = "filedistribution.receiveFilePart"; private final static String RECEIVE_EOF_METHOD = "filedistribution.receiveFileEof"; private final Supervisor supervisor; private final FileReferenceDownloader downloader; private final File downloadDirectory; private final XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); public FileReceiver(Supervisor supervisor, FileReferenceDownloader downloader, File downloadDirectory) { this.supervisor = supervisor; this.downloader = downloader; this.downloadDirectory = downloadDirectory; registerMethods(); } private void registerMethods() { receiveFileMethod(this).forEach((method) -> supervisor.addMethod(method)); } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFile(Request req) { FileReference fileReference = new FileReference(req.parameters().get(0).asString()); String filename = req.parameters().get(1).asString(); byte[] content = req.parameters().get(2).asData(); long xxhash = req.parameters().get(3).asInt64(); int errorCode = req.parameters().get(4).asInt32(); String errorDescription = req.parameters().get(5).asString(); if (errorCode == 0) { log.log(LogLevel.INFO, "Receiving file reference '" + fileReference.value() + "'"); receiveFile(fileReference, filename, content, xxhash); req.returnValues().add(new Int32Value(0)); } else { log.log(LogLevel.WARNING, "Receiving file reference '" + fileReference.value() + "' failed: " + errorDescription); req.returnValues().add(new Int32Value(1)); } } void receiveFile(FileReference fileReference, String filename, byte[] content, long xxHash) { long xxHashFromContent = hasher.hash(ByteBuffer.wrap(content), 0); if (xxHashFromContent != xxHash) throw new RuntimeException("xxhash from content (" + xxHashFromContent + ") is not equal to xxhash in request (" + xxHash + ")"); File fileReferenceDir = new File(downloadDirectory, fileReference.value()); try { Files.createDirectories(fileReferenceDir.toPath()); File file = new File(fileReferenceDir, filename); log.log(LogLevel.INFO, "Writing data to " + file.getAbsolutePath()); Files.write(file.toPath(), content); downloader.completedDownloading(fileReference, file); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage()); throw new RuntimeException("Failed writing file: ", e); } } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFileMeta(Request req) { log.info("Received method call '" + req.methodName() + "' with parameters : " + req.parameters()); } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFilePart(Request req) { log.info("Received method call '" + req.methodName() + "' with parameters : " + req.parameters()); } @SuppressWarnings({"UnusedDeclaration"}) public final void receiveFileEof(Request req) { log.info("Received method call '" + req.methodName() + "' with parameters : " + req.parameters()); } }
How can the first job not be system test, though? Isn't the deplyment spec made such that it always is, unless it isEmpty()?
private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } }
if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) {
private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Yes, but that code is far away and not explicitly guaranteed in the API, so I don't feel comfortable implicitly assuming it.
private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } }
if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) {
private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Fair enough.
private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } }
if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) {
private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
This is a nice one-liner!
public void updateKeyStore(KeyStore keyStore, String password) { updateKeyStore(sslContextFactory -> { sslContextFactory.setKeyStore(keyStore); if (password != null) { sslContextFactory.setKeyStorePassword(null); } }); }
if (password != null) {
public void updateKeyStore(KeyStore keyStore, String password) { updateKeyStore(sslContextFactory -> { sslContextFactory.setKeyStore(keyStore); if (password != null) { sslContextFactory.setKeyStorePassword(null); } }); }
class DefaultSslKeyStoreContext implements SslKeyStoreContext { private final SslContextFactory sslContextFactory; public DefaultSslKeyStoreContext(SslContextFactory sslContextFactory) { this.sslContextFactory = sslContextFactory; } @Override public void updateKeyStore(KeyStore keyStore) { updateKeyStore(keyStore, null); } @Override @Override public void updateKeyStore(String keyStorePath, String keyStoreType, String keyStorePassword) { updateKeyStore(sslContextFactory -> { sslContextFactory.setKeyStorePath(keyStorePath); sslContextFactory.setKeyStoreType(keyStoreType); sslContextFactory.setKeyStorePassword(keyStorePassword); }); } private void updateKeyStore(Consumer<SslContextFactory> reloader) { try { sslContextFactory.reload(reloader); } catch (Exception e) { throw new RuntimeException("Could not update keystore: " + e.getMessage(), e); } } }
class DefaultSslKeyStoreContext implements SslKeyStoreContext { private final SslContextFactory sslContextFactory; public DefaultSslKeyStoreContext(SslContextFactory sslContextFactory) { this.sslContextFactory = sslContextFactory; } @Override public void updateKeyStore(KeyStore keyStore) { updateKeyStore(keyStore, null); } @Override @Override public void updateKeyStore(String keyStorePath, String keyStoreType, String keyStorePassword) { updateKeyStore(sslContextFactory -> { sslContextFactory.setKeyStorePath(keyStorePath); sslContextFactory.setKeyStoreType(keyStoreType); sslContextFactory.setKeyStorePassword(keyStorePassword); }); } private void updateKeyStore(Consumer<SslContextFactory> reloader) { try { sslContextFactory.reload(reloader); } catch (Exception e) { throw new RuntimeException("Could not update keystore: " + e.getMessage(), e); } } }
What use are the braces here?
private static void validateJksConfig(ConnectorConfig.Ssl ssl) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } }
}
private static void validateJksConfig(ConnectorConfig.Ssl ssl) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } }
class DefaultSslKeyStoreConfigurator implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(DefaultSslKeyStoreConfigurator.class.getName()); private final SecretStore secretStore; private final ConnectorConfig.Ssl config; @Inject public DefaultSslKeyStoreConfigurator(ConnectorConfig config, SecretStore secretStore) { validateConfig(config.ssl()); this.secretStore = secretStore; this.config = config.ssl(); } private static void validateConfig(ConnectorConfig.Ssl config) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: validateJksConfig(config); break; case PEM: validatePemConfig(config); break; } } @Override public void configure(SslKeyStoreContext context) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: context.updateKeyStore(config.keyStorePath(), "JKS", secretStore.getSecret(config.keyDbKey())); break; case PEM: context.updateKeyStore(createPemKeyStore(config.pemKeyStore())); break; } } private static void validatePemConfig(ConnectorConfig.Ssl ssl) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } private static KeyStore createPemKeyStore(ConnectorConfig.Ssl.PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath).loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
class DefaultSslKeyStoreConfigurator implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(DefaultSslKeyStoreConfigurator.class.getName()); private final SecretStore secretStore; private final ConnectorConfig.Ssl config; @Inject public DefaultSslKeyStoreConfigurator(ConnectorConfig config, SecretStore secretStore) { validateConfig(config.ssl()); this.secretStore = secretStore; this.config = config.ssl(); } private static void validateConfig(ConnectorConfig.Ssl config) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: validateJksConfig(config); break; case PEM: validatePemConfig(config); break; } } @Override public void configure(SslKeyStoreContext context) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: context.updateKeyStore(config.keyStorePath(), "JKS", secretStore.getSecret(config.keyDbKey())); break; case PEM: context.updateKeyStore(createPemKeyStore(config.pemKeyStore())); break; } } private static void validatePemConfig(ConnectorConfig.Ssl ssl) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } private static KeyStore createPemKeyStore(ConnectorConfig.Ssl.PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath).loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
Please extract a utility method for creating the ConnectorFactory, as it's repeated in all the tests. Please consider if something is wrong with the design. It's an anti-pattern to send the same params to the ConnectorFactory and one of it's parameter objects.
public void ssl_jks_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(JKS) .pemKeyStore( new Ssl.PemKeyStore.Builder() .keyPath("nonEmpty")))); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory willThrowException = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); }
ConnectorFactory willThrowException = new ConnectorFactory(config,
public void ssl_jks_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(JKS) .pemKeyStore( new Ssl.PemKeyStore.Builder() .keyPath("nonEmpty")))); ConnectorFactory willThrowException = createConnectorFactory(config); }
class ConnectorFactoryTest { @Test(expectedExceptions = IllegalArgumentException.class) @Test(expectedExceptions = IllegalArgumentException.class) public void ssl_pem_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(PEM) .keyStorePath("nonEmpty"))); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory willThrowException = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); } @Test public void requireThatNoPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory factory = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); JDiscServerConnector connector = (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } @Test public void requireThatPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ServerSocketChannel serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(new InetSocketAddress(0)); ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory factory = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); JDiscServerConnector connector = (JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } private static class HelloWorldHandler extends AbstractHandler { @Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { response.getWriter().write("Hello world"); response.getWriter().flush(); response.getWriter().close(); baseRequest.setHandled(true); } } private static class DummyMetric implements Metric { @Override public void set(String key, Number val, Context ctx) { } @Override public void add(String key, Number val, Context ctx) { } @Override public Context createContext(Map<String, ?> properties) { return new DummyContext(); } } private static class DummyContext implements Metric.Context { } private static final class ThrowingSecretStore implements SecretStore { @Override public String getSecret(String key) { throw new UnsupportedOperationException("A secret store is not available"); } } }
class ConnectorFactoryTest { @Test(expectedExceptions = IllegalArgumentException.class) @Test(expectedExceptions = IllegalArgumentException.class) public void ssl_pem_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(PEM) .keyStorePath("nonEmpty"))); ConnectorFactory willThrowException = createConnectorFactory(config); } @Test public void requireThatNoPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ConnectorFactory factory = createConnectorFactory(config); JDiscServerConnector connector = (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } @Test public void requireThatPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ServerSocketChannel serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(new InetSocketAddress(0)); ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ConnectorFactory factory = createConnectorFactory(config); JDiscServerConnector connector = (JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } private static ConnectorFactory createConnectorFactory(ConnectorConfig config) { ThrowingSecretStore secretStore = new ThrowingSecretStore(); return new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); } private static class HelloWorldHandler extends AbstractHandler { @Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { response.getWriter().write("Hello world"); response.getWriter().flush(); response.getWriter().close(); baseRequest.setHandled(true); } } private static class DummyMetric implements Metric { @Override public void set(String key, Number val, Context ctx) { } @Override public void add(String key, Number val, Context ctx) { } @Override public Context createContext(Map<String, ?> properties) { return new DummyContext(); } } private static class DummyContext implements Metric.Context { } private static final class ThrowingSecretStore implements SecretStore { @Override public String getSecret(String key) { throw new UnsupportedOperationException("A secret store is not available"); } } }
Not sure if nextSuccess.revision() can be empty(), but doesn't hurt to be sure.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision()))
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Because that is the coding style I prefer :) I don't think we should mandate no braces on _if_ statement unless both body and expression fits single line.
private static void validateJksConfig(ConnectorConfig.Ssl ssl) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } }
}
private static void validateJksConfig(ConnectorConfig.Ssl ssl) { if (!ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyDbKey().isEmpty()) { throw new IllegalArgumentException("Missing password for JKS keystore"); } }
class DefaultSslKeyStoreConfigurator implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(DefaultSslKeyStoreConfigurator.class.getName()); private final SecretStore secretStore; private final ConnectorConfig.Ssl config; @Inject public DefaultSslKeyStoreConfigurator(ConnectorConfig config, SecretStore secretStore) { validateConfig(config.ssl()); this.secretStore = secretStore; this.config = config.ssl(); } private static void validateConfig(ConnectorConfig.Ssl config) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: validateJksConfig(config); break; case PEM: validatePemConfig(config); break; } } @Override public void configure(SslKeyStoreContext context) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: context.updateKeyStore(config.keyStorePath(), "JKS", secretStore.getSecret(config.keyDbKey())); break; case PEM: context.updateKeyStore(createPemKeyStore(config.pemKeyStore())); break; } } private static void validatePemConfig(ConnectorConfig.Ssl ssl) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } private static KeyStore createPemKeyStore(ConnectorConfig.Ssl.PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath).loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
class DefaultSslKeyStoreConfigurator implements SslKeyStoreConfigurator { private static final Logger log = Logger.getLogger(DefaultSslKeyStoreConfigurator.class.getName()); private final SecretStore secretStore; private final ConnectorConfig.Ssl config; @Inject public DefaultSslKeyStoreConfigurator(ConnectorConfig config, SecretStore secretStore) { validateConfig(config.ssl()); this.secretStore = secretStore; this.config = config.ssl(); } private static void validateConfig(ConnectorConfig.Ssl config) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: validateJksConfig(config); break; case PEM: validatePemConfig(config); break; } } @Override public void configure(SslKeyStoreContext context) { if (!config.enabled()) return; switch (config.keyStoreType()) { case JKS: context.updateKeyStore(config.keyStorePath(), "JKS", secretStore.getSecret(config.keyDbKey())); break; case PEM: context.updateKeyStore(createPemKeyStore(config.pemKeyStore())); break; } } private static void validatePemConfig(ConnectorConfig.Ssl ssl) { if (! ssl.keyStorePath().isEmpty()) { throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } if (!ssl.keyDbKey().isEmpty()) { log.warning("Encrypted PEM key stores are not supported. Password is only applied to truststore"); } if (ssl.pemKeyStore().certificatePath().isEmpty()) { throw new IllegalArgumentException("Missing certificate path."); } if (ssl.pemKeyStore().keyPath().isEmpty()) { throw new IllegalArgumentException("Missing key path."); } } private static KeyStore createPemKeyStore(ConnectorConfig.Ssl.PemKeyStore pemKeyStore) { try { Path certificatePath = Paths.get(pemKeyStore.certificatePath()); Path keyPath = Paths.get(pemKeyStore.keyPath()); return new PemSslKeyStore(certificatePath, keyPath).loadJavaKeyStore(); } catch (IOException e) { throw new UncheckedIOException(e); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } }
Will fix
public void ssl_jks_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(JKS) .pemKeyStore( new Ssl.PemKeyStore.Builder() .keyPath("nonEmpty")))); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory willThrowException = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); }
ConnectorFactory willThrowException = new ConnectorFactory(config,
public void ssl_jks_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(JKS) .pemKeyStore( new Ssl.PemKeyStore.Builder() .keyPath("nonEmpty")))); ConnectorFactory willThrowException = createConnectorFactory(config); }
class ConnectorFactoryTest { @Test(expectedExceptions = IllegalArgumentException.class) @Test(expectedExceptions = IllegalArgumentException.class) public void ssl_pem_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(PEM) .keyStorePath("nonEmpty"))); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory willThrowException = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); } @Test public void requireThatNoPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory factory = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); JDiscServerConnector connector = (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } @Test public void requireThatPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ServerSocketChannel serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(new InetSocketAddress(0)); ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory factory = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); JDiscServerConnector connector = (JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } private static class HelloWorldHandler extends AbstractHandler { @Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { response.getWriter().write("Hello world"); response.getWriter().flush(); response.getWriter().close(); baseRequest.setHandled(true); } } private static class DummyMetric implements Metric { @Override public void set(String key, Number val, Context ctx) { } @Override public void add(String key, Number val, Context ctx) { } @Override public Context createContext(Map<String, ?> properties) { return new DummyContext(); } } private static class DummyContext implements Metric.Context { } private static final class ThrowingSecretStore implements SecretStore { @Override public String getSecret(String key) { throw new UnsupportedOperationException("A secret store is not available"); } } }
class ConnectorFactoryTest { @Test(expectedExceptions = IllegalArgumentException.class) @Test(expectedExceptions = IllegalArgumentException.class) public void ssl_pem_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(PEM) .keyStorePath("nonEmpty"))); ConnectorFactory willThrowException = createConnectorFactory(config); } @Test public void requireThatNoPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ConnectorFactory factory = createConnectorFactory(config); JDiscServerConnector connector = (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } @Test public void requireThatPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ServerSocketChannel serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(new InetSocketAddress(0)); ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ConnectorFactory factory = createConnectorFactory(config); JDiscServerConnector connector = (JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } private static ConnectorFactory createConnectorFactory(ConnectorConfig config) { ThrowingSecretStore secretStore = new ThrowingSecretStore(); return new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); } private static class HelloWorldHandler extends AbstractHandler { @Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { response.getWriter().write("Hello world"); response.getWriter().flush(); response.getWriter().close(); baseRequest.setHandled(true); } } private static class DummyMetric implements Metric { @Override public void set(String key, Number val, Context ctx) { } @Override public void add(String key, Number val, Context ctx) { } @Override public Context createContext(Map<String, ?> properties) { return new DummyContext(); } } private static class DummyContext implements Metric.Context { } private static final class ThrowingSecretStore implements SecretStore { @Override public String getSecret(String key) { throw new UnsupportedOperationException("A secret store is not available"); } } }
We could discuss changing the interface to take the `ConnectorConfig` as parameter to `configure()`. I would say that it is a coincidence that `DefaultSslKeyStoreConfigurator` also takes a `SecretStore`.
public void ssl_jks_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(JKS) .pemKeyStore( new Ssl.PemKeyStore.Builder() .keyPath("nonEmpty")))); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory willThrowException = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); }
ConnectorFactory willThrowException = new ConnectorFactory(config,
public void ssl_jks_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(JKS) .pemKeyStore( new Ssl.PemKeyStore.Builder() .keyPath("nonEmpty")))); ConnectorFactory willThrowException = createConnectorFactory(config); }
class ConnectorFactoryTest { @Test(expectedExceptions = IllegalArgumentException.class) @Test(expectedExceptions = IllegalArgumentException.class) public void ssl_pem_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(PEM) .keyStorePath("nonEmpty"))); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory willThrowException = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); } @Test public void requireThatNoPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory factory = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); JDiscServerConnector connector = (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } @Test public void requireThatPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ServerSocketChannel serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(new InetSocketAddress(0)); ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ThrowingSecretStore secretStore = new ThrowingSecretStore(); ConnectorFactory factory = new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); JDiscServerConnector connector = (JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } private static class HelloWorldHandler extends AbstractHandler { @Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { response.getWriter().write("Hello world"); response.getWriter().flush(); response.getWriter().close(); baseRequest.setHandled(true); } } private static class DummyMetric implements Metric { @Override public void set(String key, Number val, Context ctx) { } @Override public void add(String key, Number val, Context ctx) { } @Override public Context createContext(Map<String, ?> properties) { return new DummyContext(); } } private static class DummyContext implements Metric.Context { } private static final class ThrowingSecretStore implements SecretStore { @Override public String getSecret(String key) { throw new UnsupportedOperationException("A secret store is not available"); } } }
class ConnectorFactoryTest { @Test(expectedExceptions = IllegalArgumentException.class) @Test(expectedExceptions = IllegalArgumentException.class) public void ssl_pem_config_is_validated() { ConnectorConfig config = new ConnectorConfig( new ConnectorConfig.Builder() .ssl(new Ssl.Builder() .enabled(true) .keyStoreType(PEM) .keyStorePath("nonEmpty"))); ConnectorFactory willThrowException = createConnectorFactory(config); } @Test public void requireThatNoPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ConnectorFactory factory = createConnectorFactory(config); JDiscServerConnector connector = (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } @Test public void requireThatPreBoundChannelWorks() throws Exception { Server server = new Server(); try { ServerSocketChannel serverChannel = ServerSocketChannel.open(); serverChannel.socket().bind(new InetSocketAddress(0)); ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder()); ConnectorFactory factory = createConnectorFactory(config); JDiscServerConnector connector = (JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel); server.addConnector(connector); server.setHandler(new HelloWorldHandler()); server.start(); SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false); SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb"); SimpleHttpClient.ResponseValidator val = ex.execute(); val.expectContent(equalTo("Hello world")); } finally { try { server.stop(); } catch (Exception e) { } } } private static ConnectorFactory createConnectorFactory(ConnectorConfig config) { ThrowingSecretStore secretStore = new ThrowingSecretStore(); return new ConnectorFactory(config, secretStore, new DefaultSslKeyStoreConfigurator(config, secretStore)); } private static class HelloWorldHandler extends AbstractHandler { @Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { response.getWriter().write("Hello world"); response.getWriter().flush(); response.getWriter().close(); baseRequest.setHandled(true); } } private static class DummyMetric implements Metric { @Override public void set(String key, Number val, Context ctx) { } @Override public void add(String key, Number val, Context ctx) { } @Override public Context createContext(Map<String, ?> properties) { return new DummyContext(); } } private static class DummyContext implements Metric.Context { } private static final class ThrowingSecretStore implements SecretStore { @Override public String getSecret(String key) { throw new UnsupportedOperationException("A secret store is not available"); } } }
next is null when that job type hasn't been run before.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type()))
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
I believe this must have been the intention?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Actually, no. job.isSuccess tells us if the job is currently successful. This checks if there is a successful outcome available, regardless of whether it is the current state.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
The combination ` ! lastSuccess().isPresent() && ! production...` could be reduced to just ` ! lastSuccess().isPresent()`, then, because this check is also done in the latter.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Thanks!
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type()))
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Also, reading the doc comment for the `production...` check leads me to believe this was, indeed, the intention: _If the job is failing, and isn't a production job with a previous success on this version, bail out._
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Where is the code you are talking about now?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
On the line below.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Below what?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Lines 187 (marked, above) and 188 have a compound condition: ``` if ( ! previous.lastSuccess().isPresent() && ! productionUpgradeHasSucceededFor(...)) ```
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
And you think that condition could be reduced to just the first line, is that it?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
As it stands, yes: * If `last success` is present, the first condition becomes `! true`, and we short-circuit. * If `last success` is not present, we check whether `! productionUpgradeHasSuccededFor`; this is also `true`, since it, too, checks if `last success` is present. This, and the javadoc for `productionUpgradeHasSuccededFor`, as well as general reasoning, lead to my conclusion about the intention here.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
I think you mean we can drop the *first* condition, not the last then?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
No, that would make us always bail out for non-production jobs and non-version-changes. I didn't say we should drop anything, I said as it stands, we could as well omit the last condition, which leads me to believe the current condition is faulty.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Perhaps there would have been no confusion if my highlight covered the line with the second condition as well...
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
... since it is left untouched.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Ok, now I get it. Yes, I think we can remove the second condition as things stand now.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Agreed.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.isSuccess() && ! productionUpgradeHasSucceededFor(previous, change)) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
if ( ! previous.isSuccess() &&
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * When upgrading it is ok to trigger the next job even if the previous failed if the previous has earlier succeeded * on the version we are currently upgrading to */ private boolean productionUpgradeHasSucceededFor(JobStatus jobStatus, Change change) { if ( ! (change instanceof Change.VersionChange) ) return false; if ( ! isProduction(jobStatus.type())) return false; Optional<JobStatus.JobRun> lastSuccess = jobStatus.lastSuccess(); if ( ! lastSuccess.isPresent()) return false; return lastSuccess.get().version().equals(((Change.VersionChange)change).version()); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.givesNewRevision(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (shouldRetryNow(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) && application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange)application.deploying().get()).version(); JobStatus jobStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (jobStatus == null || ! jobStatus.lastTriggered().isPresent() || ! jobStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job"); applications().store(application); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job"); applications().store(application); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { LockedApplication lockedApplication = applications().require(application.id(), lock); lockedApplication = trigger(order.nextAfter(lastSuccessfulJob.get().type(), lockedApplication), lockedApplication, "Resuming delayed deployment"); applications().store(lockedApplication); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && ! status.isSuccess() && status.lastCompleted().isPresent() && status.lastCompleted().get().lastCompletedWas(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deployingBlocked(clock.instant())) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! deploysTo(application, jobType)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)application.deploying().get()).version(); if (isOnNewerVersionInProductionThan(targetVersion, application, jobType)) return false; } return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Please check that getInjectedComponentIds().size == 1
public void ssl_keystore_configurator_can_be_overriden() throws IOException, SAXException { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo'>", " <ssl-keystore-configurator class='com.yahoo.MySslKeyStoreConfigurator' bundle='mybundle'/>", " </server>", " <server port='9001' id='bar'/>", " </http>", nodesXml, "</jdisc>"); createModel(root, clusterElem); ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); List<ConnectorFactory> connectorFactories = cluster.getChildrenByTypeRecursive(ConnectorFactory.class); { ConnectorFactory firstConnector = connectorFactories.get(0); assertThat(firstConnector.getInjectedComponentIds(), hasItem("ssl-keystore-configurator@foo")); SimpleComponent sslKeystoreConfigurator = firstConnector.getChildrenByTypeRecursive(SimpleComponent.class).get(0); BundleInstantiationSpecification spec = sslKeystoreConfigurator.model.bundleInstantiationSpec; assertThat(spec.classId.toString(), is("com.yahoo.MySslKeyStoreConfigurator")); assertThat(spec.bundle.toString(), is("mybundle")); } { ConnectorFactory secondFactory = connectorFactories.get(1); assertThat(secondFactory.getInjectedComponentIds(), hasItem("ssl-keystore-configurator@bar")); SimpleComponent sslKeystoreConfigurator = secondFactory.getChildrenByTypeRecursive(SimpleComponent.class).get(0); BundleInstantiationSpecification spec = sslKeystoreConfigurator.model.bundleInstantiationSpec; assertThat(spec.classId.toString(), is(DefaultSslKeyStoreConfigurator.class.getName())); assertThat(spec.bundle.toString(), is("jdisc_http_service")); } }
assertThat(firstConnector.getInjectedComponentIds(), hasItem("ssl-keystore-configurator@foo"));
public void ssl_keystore_configurator_can_be_overriden() throws IOException, SAXException { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo'>", " <ssl-keystore-configurator class='com.yahoo.MySslKeyStoreConfigurator' bundle='mybundle'/>", " </server>", " <server port='9001' id='bar'/>", " </http>", nodesXml, "</jdisc>"); createModel(root, clusterElem); ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); List<ConnectorFactory> connectorFactories = cluster.getChildrenByTypeRecursive(ConnectorFactory.class); { ConnectorFactory firstConnector = connectorFactories.get(0); assertThat(firstConnector.getInjectedComponentIds(), hasItem("ssl-keystore-configurator@foo")); SimpleComponent sslKeystoreConfigurator = firstConnector.getChildrenByTypeRecursive(SimpleComponent.class).get(0); BundleInstantiationSpecification spec = sslKeystoreConfigurator.model.bundleInstantiationSpec; assertThat(spec.classId.toString(), is("com.yahoo.MySslKeyStoreConfigurator")); assertThat(spec.bundle.toString(), is("mybundle")); } { ConnectorFactory secondFactory = connectorFactories.get(1); assertThat(secondFactory.getInjectedComponentIds(), hasItem("ssl-keystore-configurator@bar")); SimpleComponent sslKeystoreConfigurator = secondFactory.getChildrenByTypeRecursive(SimpleComponent.class).get(0); BundleInstantiationSpecification spec = sslKeystoreConfigurator.model.bundleInstantiationSpec; assertThat(spec.classId.toString(), is(DefaultSslKeyStoreConfigurator.class.getName())); assertThat(spec.bundle.toString(), is("jdisc_http_service")); } }
class JettyContainerModelBuilderTest extends ContainerModelBuilderTestBase { @Test public void verify_that_overriding_connector_options_works() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>\n" + " <http>\n" + " <server id='bananarama' port='4321'>\n" + " <config name='jdisc.http.connector'>\n" + " <requestHeaderSize>300000</requestHeaderSize>\n" + " <headerCacheSize>300000</headerCacheSize>\n" + " </config>\n" + " </server>\n" + " </http>\n" + nodesXml + "</jdisc>\n" ); createModel(root, clusterElem); ConnectorConfig.Builder connectorConfigBuilder = new ConnectorConfig.Builder(); ConnectorConfig cfg = root.getConfig(ConnectorConfig.class, "default/http/jdisc-jetty/bananarama"); assertThat(cfg.requestHeaderSize(), is(300000)); assertThat(cfg.headerCacheSize(), is(300000)); } @Test public void verify_that_enabling_jetty_works() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>" + nodesXml + "</jdisc>" ); createModel(root, clusterElem); assertJettyServerInConfig(); } @Test public void verify_that_enabling_jetty_works_for_custom_http_servers() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo' />", " </http>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); assertJettyServerInConfig(); } @Test public void verifyThatJettyHttpServerHasFilterBindingsProvider() throws Exception { final Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); final ComponentsConfig.Components jettyHttpServerComponent = extractComponentByClassName( containerComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()); assertThat(jettyHttpServerComponent, is(not(nullValue()))); final ComponentsConfig.Components filterBindingsProviderComponent = extractComponentByClassName( containerComponentsConfig(), FilterBindingsProvider.class.getName()); assertThat(filterBindingsProviderComponent, is(not(nullValue()))); final ComponentsConfig.Components.Inject filterBindingsProviderInjection = extractInjectionById( jettyHttpServerComponent, filterBindingsProviderComponent.id()); assertThat(filterBindingsProviderInjection, is(not(nullValue()))); } @Test public void verifyThatJettyHttpServerHasFilterBindingsProviderForCustomHttpServers() throws Exception { final Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo' />", " </http>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); final ComponentsConfig.Components jettyHttpServerComponent = extractComponentByClassName( clusterComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()); assertThat(jettyHttpServerComponent, is(not(nullValue()))); final ComponentsConfig.Components filterBindingsProviderComponent = extractComponentByClassName( clusterComponentsConfig(), FilterBindingsProvider.class.getName()); assertThat(filterBindingsProviderComponent, is(not(nullValue()))); final ComponentsConfig.Components.Inject filterBindingsProviderInjection = extractInjectionById( jettyHttpServerComponent, filterBindingsProviderComponent.id()); assertThat(filterBindingsProviderInjection, is(not(nullValue()))); } @Test public void verify_that_old_http_config_override_inside_server_tag_works() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo'>", " <config name=\"container.jdisc.config.http-server\">", " <tcpKeepAliveEnabled>true</tcpKeepAliveEnabled>", " <tcpNoDelayEnabled>false</tcpNoDelayEnabled>", " <tcpListenBacklogLength>2</tcpListenBacklogLength>", " <idleConnectionTimeout>34.1</idleConnectionTimeout>", " <soLinger>42.2</soLinger>", " <sendBufferSize>1234</sendBufferSize>", " <maxHeaderSize>4321</maxHeaderSize>", " <ssl>", " <enabled>true</enabled>", " <keyStoreType>JKS</keyStoreType>", " <keyStorePath>apple</keyStorePath>", " <trustStorePath>grape</trustStorePath>", " <keyDBKey>tomato</keyDBKey>", " <algorithm>onion</algorithm>", " <protocol>carrot</protocol>", " </ssl>", " </config>", " </server>", " </http>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); List<JettyHttpServer> jettyServers = cluster.getChildrenByTypeRecursive(JettyHttpServer.class); assertThat(jettyServers.size(), is(1)); JettyHttpServer server = jettyServers.get(0); assertThat(server.model.bundleInstantiationSpec.classId.toString(), is(com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName())); assertThat(server.model.bundleInstantiationSpec.bundle.toString(), is("jdisc_http_service")); assertThat(server.getConnectorFactories().size(), is(1)); ConnectorConfig.Builder connectorConfigBuilder = new ConnectorConfig.Builder(); server.getConnectorFactories().get(0).getConfig(connectorConfigBuilder); ConnectorConfig connector = new ConnectorConfig(connectorConfigBuilder); assertThat(connector.name(), equalTo("foo")); assertThat(connector.tcpKeepAliveEnabled(), equalTo(true)); assertThat(connector.tcpNoDelay(), equalTo(false)); assertThat(connector.acceptQueueSize(), equalTo(2)); assertThat(connector.idleTimeout(), equalTo(34.1)); assertThat(connector.soLingerTime(), equalTo(42.2)); assertThat(connector.outputBufferSize(), equalTo(1234)); assertThat(connector.headerCacheSize(), equalTo(4321)); assertThat(connector.ssl().enabled(), equalTo(true)); assertThat(connector.ssl().keyStoreType(), equalTo(KeyStoreType.Enum.JKS)); assertThat(connector.ssl().keyStorePath(), equalTo("apple")); assertThat(connector.ssl().trustStorePath(), equalTo("grape")); assertThat(connector.ssl().keyDbKey(), equalTo("tomato")); assertThat(connector.ssl().sslKeyManagerFactoryAlgorithm(), equalTo("onion")); assertThat(connector.ssl().protocol(), equalTo("carrot")); assertThat( extractComponentByClassName( clusterComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()), is(not(nullValue()))); } @Test private void assertJettyServerInConfig() { ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); List<JettyHttpServer> jettyServers = cluster.getChildrenByTypeRecursive(JettyHttpServer.class); assertThat(jettyServers.size(), is(1)); JettyHttpServer server = jettyServers.get(0); assertThat(server.model.bundleInstantiationSpec.classId.toString(), is(com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName())); assertThat(server.model.bundleInstantiationSpec.bundle.toString(), is("jdisc_http_service")); assertThat(server.getConnectorFactories().size(), is(1)); assertThat( extractComponentByClassName( containerComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()), is(not(nullValue()))); } private static ComponentsConfig.Components extractComponentByClassName( final ComponentsConfig componentsConfig, final String className) { for (final ComponentsConfig.Components component : componentsConfig.components()) { if (className.equals(component.classId())) { return component; } } return null; } private static ComponentsConfig.Components.Inject extractInjectionById( final ComponentsConfig.Components component, final String id) { for (final ComponentsConfig.Components.Inject injection : component.inject()) { if (id.equals(injection.id())) { return injection; } } return null; } private ComponentsConfig containerComponentsConfig() { final ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); return root.getConfig( ComponentsConfig.class, cluster.getContainers().get(0).getConfigId()); } private ComponentsConfig clusterComponentsConfig() { return componentsConfig(); } }
class JettyContainerModelBuilderTest extends ContainerModelBuilderTestBase { @Test public void verify_that_overriding_connector_options_works() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>\n" + " <http>\n" + " <server id='bananarama' port='4321'>\n" + " <config name='jdisc.http.connector'>\n" + " <requestHeaderSize>300000</requestHeaderSize>\n" + " <headerCacheSize>300000</headerCacheSize>\n" + " </config>\n" + " </server>\n" + " </http>\n" + nodesXml + "</jdisc>\n" ); createModel(root, clusterElem); ConnectorConfig.Builder connectorConfigBuilder = new ConnectorConfig.Builder(); ConnectorConfig cfg = root.getConfig(ConnectorConfig.class, "default/http/jdisc-jetty/bananarama"); assertThat(cfg.requestHeaderSize(), is(300000)); assertThat(cfg.headerCacheSize(), is(300000)); } @Test public void verify_that_enabling_jetty_works() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>" + nodesXml + "</jdisc>" ); createModel(root, clusterElem); assertJettyServerInConfig(); } @Test public void verify_that_enabling_jetty_works_for_custom_http_servers() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo' />", " </http>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); assertJettyServerInConfig(); } @Test public void verifyThatJettyHttpServerHasFilterBindingsProvider() throws Exception { final Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); final ComponentsConfig.Components jettyHttpServerComponent = extractComponentByClassName( containerComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()); assertThat(jettyHttpServerComponent, is(not(nullValue()))); final ComponentsConfig.Components filterBindingsProviderComponent = extractComponentByClassName( containerComponentsConfig(), FilterBindingsProvider.class.getName()); assertThat(filterBindingsProviderComponent, is(not(nullValue()))); final ComponentsConfig.Components.Inject filterBindingsProviderInjection = extractInjectionById( jettyHttpServerComponent, filterBindingsProviderComponent.id()); assertThat(filterBindingsProviderInjection, is(not(nullValue()))); } @Test public void verifyThatJettyHttpServerHasFilterBindingsProviderForCustomHttpServers() throws Exception { final Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo' />", " </http>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); final ComponentsConfig.Components jettyHttpServerComponent = extractComponentByClassName( clusterComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()); assertThat(jettyHttpServerComponent, is(not(nullValue()))); final ComponentsConfig.Components filterBindingsProviderComponent = extractComponentByClassName( clusterComponentsConfig(), FilterBindingsProvider.class.getName()); assertThat(filterBindingsProviderComponent, is(not(nullValue()))); final ComponentsConfig.Components.Inject filterBindingsProviderInjection = extractInjectionById( jettyHttpServerComponent, filterBindingsProviderComponent.id()); assertThat(filterBindingsProviderInjection, is(not(nullValue()))); } @Test public void verify_that_old_http_config_override_inside_server_tag_works() throws Exception { Element clusterElem = DomBuilderTest.parse( "<jdisc id='default' version='1.0' jetty='true'>", " <http>", " <server port='9000' id='foo'>", " <config name=\"container.jdisc.config.http-server\">", " <tcpKeepAliveEnabled>true</tcpKeepAliveEnabled>", " <tcpNoDelayEnabled>false</tcpNoDelayEnabled>", " <tcpListenBacklogLength>2</tcpListenBacklogLength>", " <idleConnectionTimeout>34.1</idleConnectionTimeout>", " <soLinger>42.2</soLinger>", " <sendBufferSize>1234</sendBufferSize>", " <maxHeaderSize>4321</maxHeaderSize>", " <ssl>", " <enabled>true</enabled>", " <keyStoreType>JKS</keyStoreType>", " <keyStorePath>apple</keyStorePath>", " <trustStorePath>grape</trustStorePath>", " <keyDBKey>tomato</keyDBKey>", " <algorithm>onion</algorithm>", " <protocol>carrot</protocol>", " </ssl>", " </config>", " </server>", " </http>", nodesXml, "</jdisc>" ); createModel(root, clusterElem); ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); List<JettyHttpServer> jettyServers = cluster.getChildrenByTypeRecursive(JettyHttpServer.class); assertThat(jettyServers.size(), is(1)); JettyHttpServer server = jettyServers.get(0); assertThat(server.model.bundleInstantiationSpec.classId.toString(), is(com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName())); assertThat(server.model.bundleInstantiationSpec.bundle.toString(), is("jdisc_http_service")); assertThat(server.getConnectorFactories().size(), is(1)); ConnectorConfig.Builder connectorConfigBuilder = new ConnectorConfig.Builder(); server.getConnectorFactories().get(0).getConfig(connectorConfigBuilder); ConnectorConfig connector = new ConnectorConfig(connectorConfigBuilder); assertThat(connector.name(), equalTo("foo")); assertThat(connector.tcpKeepAliveEnabled(), equalTo(true)); assertThat(connector.tcpNoDelay(), equalTo(false)); assertThat(connector.acceptQueueSize(), equalTo(2)); assertThat(connector.idleTimeout(), equalTo(34.1)); assertThat(connector.soLingerTime(), equalTo(42.2)); assertThat(connector.outputBufferSize(), equalTo(1234)); assertThat(connector.headerCacheSize(), equalTo(4321)); assertThat(connector.ssl().enabled(), equalTo(true)); assertThat(connector.ssl().keyStoreType(), equalTo(KeyStoreType.Enum.JKS)); assertThat(connector.ssl().keyStorePath(), equalTo("apple")); assertThat(connector.ssl().trustStorePath(), equalTo("grape")); assertThat(connector.ssl().keyDbKey(), equalTo("tomato")); assertThat(connector.ssl().sslKeyManagerFactoryAlgorithm(), equalTo("onion")); assertThat(connector.ssl().protocol(), equalTo("carrot")); assertThat( extractComponentByClassName( clusterComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()), is(not(nullValue()))); } @Test private void assertJettyServerInConfig() { ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); List<JettyHttpServer> jettyServers = cluster.getChildrenByTypeRecursive(JettyHttpServer.class); assertThat(jettyServers.size(), is(1)); JettyHttpServer server = jettyServers.get(0); assertThat(server.model.bundleInstantiationSpec.classId.toString(), is(com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName())); assertThat(server.model.bundleInstantiationSpec.bundle.toString(), is("jdisc_http_service")); assertThat(server.getConnectorFactories().size(), is(1)); assertThat( extractComponentByClassName( containerComponentsConfig(), com.yahoo.jdisc.http.server.jetty.JettyHttpServer.class.getName()), is(not(nullValue()))); } private static ComponentsConfig.Components extractComponentByClassName( final ComponentsConfig componentsConfig, final String className) { for (final ComponentsConfig.Components component : componentsConfig.components()) { if (className.equals(component.classId())) { return component; } } return null; } private static ComponentsConfig.Components.Inject extractInjectionById( final ComponentsConfig.Components component, final String id) { for (final ComponentsConfig.Components.Inject injection : component.inject()) { if (id.equals(injection.id())) { return injection; } } return null; } private ComponentsConfig containerComponentsConfig() { final ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default"); return root.getConfig( ComponentsConfig.class, cluster.getContainers().get(0).getConfigId()); } private ComponentsConfig clusterComponentsConfig() { return componentsConfig(); } }
INFO for each GET request is a bit verbose. Also, isn't this in the access log?
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { log.log(LogLevel.INFO, "Generating identity document for " + hostname); return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument [%s]", e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
log.log(LogLevel.INFO, "Generating identity document for " + hostname);
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument for '%s': %s", hostname, e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
this should contain the hostname
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { log.log(LogLevel.INFO, "Generating identity document for " + hostname); return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument [%s]", e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
String message = String.format("Unable to generate identity doument [%s]", e.getMessage());
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument for '%s': %s", hostname, e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
Agree, this can be removed.
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { log.log(LogLevel.INFO, "Generating identity document for " + hostname); return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument [%s]", e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
log.log(LogLevel.INFO, "Generating identity document for " + hostname);
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument for '%s': %s", hostname, e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
Good point, will fix!
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { log.log(LogLevel.INFO, "Generating identity document for " + hostname); return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument [%s]", e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
String message = String.format("Unable to generate identity doument [%s]", e.getMessage());
public SignedIdentityDocument getIdentityDocument(@QueryParam("hostname") String hostname) { if (hostname == null) { throw new BadRequestException("The 'hostname' query parameter is missing"); } try { return identityDocumentGenerator.generateSignedIdentityDocument(hostname); } catch (Exception e) { String message = String.format("Unable to generate identity doument for '%s': %s", hostname, e.getMessage()); log.log(LogLevel.ERROR, message, e); throw new InternalServerErrorException(message, e); } }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
class IdentityDocumentResource { private static final Logger log = Logger.getLogger(IdentityDocumentResource.class.getName()); private final IdentityDocumentGenerator identityDocumentGenerator; @Inject public IdentityDocumentResource(@Component AthenzProviderServiceConfig config, @Component Zone zone, @Component NodeRepository nodeRepository, @Component KeyProvider keyProvider) { AthenzProviderServiceConfig.Zones zoneConfig = getZoneConfig(config, zone); this.identityDocumentGenerator = new IdentityDocumentGenerator(config, zoneConfig, nodeRepository, zone, keyProvider); } @GET @Produces(MediaType.APPLICATION_JSON) }
typo: DEFAILT -> DEFAULT
public SignedIdentityDocument generateSignedIdentityDocument(String hostname) { Node node = nodeRepository.getNode(hostname).orElseThrow(() -> new RuntimeException("Unable to find node " + hostname)); try { IdentityDocument identityDocument = generateIdDocument(node); String identityDocumentString = Utils.getMapper().writeValueAsString(identityDocument); String encodedIdentityDocument = Base64.getEncoder().encodeToString(identityDocumentString.getBytes()); Signature sigGenerator = Signature.getInstance("SHA512withRSA"); PrivateKey privateKey = keyProvider.getPrivateKey(signingSecretVersion); sigGenerator.initSign(privateKey); sigGenerator.update(encodedIdentityDocument.getBytes()); String signature = Base64.getEncoder().encodeToString(sigGenerator.sign()); return new SignedIdentityDocument( encodedIdentityDocument, signature, SignedIdentityDocument.DEFAULT_KEY_VERSION, identityDocument.providerUniqueId.asString(), toZoneDnsSuffix(zone, dnsSuffix), providerDomain + "." + providerService, ztsUrl, SignedIdentityDocument.DEFAILT_DOCUMENT_VERSION); } catch (Exception e) { throw new RuntimeException("Exception generating identity document: " + e.getMessage(), e); } }
SignedIdentityDocument.DEFAILT_DOCUMENT_VERSION);
public SignedIdentityDocument generateSignedIdentityDocument(String hostname) { Node node = nodeRepository.getNode(hostname).orElseThrow(() -> new RuntimeException("Unable to find node " + hostname)); try { IdentityDocument identityDocument = generateIdDocument(node); String identityDocumentString = Utils.getMapper().writeValueAsString(identityDocument); String encodedIdentityDocument = Base64.getEncoder().encodeToString(identityDocumentString.getBytes()); Signature sigGenerator = Signature.getInstance("SHA512withRSA"); PrivateKey privateKey = keyProvider.getPrivateKey(signingSecretVersion); sigGenerator.initSign(privateKey); sigGenerator.update(encodedIdentityDocument.getBytes()); String signature = Base64.getEncoder().encodeToString(sigGenerator.sign()); return new SignedIdentityDocument( encodedIdentityDocument, signature, SignedIdentityDocument.DEFAULT_KEY_VERSION, identityDocument.providerUniqueId.asString(), toZoneDnsSuffix(zone, dnsSuffix), providerDomain + "." + providerService, ztsUrl, SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION); } catch (Exception e) { throw new RuntimeException("Exception generating identity document: " + e.getMessage(), e); } }
class IdentityDocumentGenerator { private final NodeRepository nodeRepository; private final Zone zone; private final KeyProvider keyProvider; private final String dnsSuffix; private final String providerService; private final String ztsUrl; private final String providerDomain; private final int signingSecretVersion; public IdentityDocumentGenerator(AthenzProviderServiceConfig config, AthenzProviderServiceConfig.Zones zoneConfig, NodeRepository nodeRepository, Zone zone, KeyProvider keyProvider) { this.nodeRepository = nodeRepository; this.zone = zone; this.keyProvider = keyProvider; this.dnsSuffix = config.certDnsSuffix(); this.providerService = zoneConfig.serviceName(); this.ztsUrl = config.ztsUrl(); this.providerDomain = zoneConfig.domain(); this.signingSecretVersion = zoneConfig.secretVersion(); } private IdentityDocument generateIdDocument(Node node) { Allocation allocation = node.allocation().orElseThrow(() -> new RuntimeException("No allocation for node " + node.hostname())); ProviderUniqueId providerUniqueId = new ProviderUniqueId( allocation.owner().tenant().value(), allocation.owner().application().value(), zone.environment().value(), zone.region().value(), allocation.owner().instance().value(), allocation.membership().cluster().id().value(), allocation.membership().index()); return new IdentityDocument( providerUniqueId, "localhost", node.hostname(), Instant.now()); } private static String toZoneDnsSuffix(Zone zone, String dnsSuffix) { return zone.environment().value() + "-" + zone.region().value() + "." + dnsSuffix; } }
class IdentityDocumentGenerator { private final NodeRepository nodeRepository; private final Zone zone; private final KeyProvider keyProvider; private final String dnsSuffix; private final String providerService; private final String ztsUrl; private final String providerDomain; private final int signingSecretVersion; public IdentityDocumentGenerator(AthenzProviderServiceConfig config, AthenzProviderServiceConfig.Zones zoneConfig, NodeRepository nodeRepository, Zone zone, KeyProvider keyProvider) { this.nodeRepository = nodeRepository; this.zone = zone; this.keyProvider = keyProvider; this.dnsSuffix = config.certDnsSuffix(); this.providerService = zoneConfig.serviceName(); this.ztsUrl = config.ztsUrl(); this.providerDomain = zoneConfig.domain(); this.signingSecretVersion = zoneConfig.secretVersion(); } private IdentityDocument generateIdDocument(Node node) { Allocation allocation = node.allocation().orElseThrow(() -> new RuntimeException("No allocation for node " + node.hostname())); ProviderUniqueId providerUniqueId = new ProviderUniqueId( allocation.owner().tenant().value(), allocation.owner().application().value(), zone.environment().value(), zone.region().value(), allocation.owner().instance().value(), allocation.membership().cluster().id().value(), allocation.membership().index()); return new IdentityDocument( providerUniqueId, "localhost", node.hostname(), Instant.now()); } private static String toZoneDnsSuffix(Zone zone, String dnsSuffix) { return zone.environment().value() + "-" + zone.region().value() + "." + dnsSuffix; } }
Same as above.
private Optional<ApplicationRevision> deployRevisionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? Optional.empty() : deployRevisionIn(jobType.zone(controller.system()).get()); }
: deployRevisionIn(jobType.zone(controller.system()).get());
private Optional<ApplicationRevision> deployRevisionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? Optional.empty() : deployRevisionIn(jobType.zone(controller.system()).get()); }
class LockedApplication extends Application { private final Lock lock; /** * LockedApplication should be acquired through ApplicationController and never constructed directly * * @param application Application instance for which lock has been acquired * @param lock Unused, but must be held when constructing this */ LockedApplication(Application application, Lock lock) { super(application.id(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.deploying(), application.hasOutstandingChange()); this.lock = Objects.requireNonNull(lock, "lock cannot be null"); } public LockedApplication withProjectId(long projectId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withProjectId(projectId), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(IssueId issueId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().with(issueId), deploying(), hasOutstandingChange()), lock); } public LockedApplication withJobCompletion(DeploymentJobs.JobReport report, Instant notificationTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withCompletion(report, notificationTime, controller), deploying(), hasOutstandingChange()), lock); } public LockedApplication withJobTriggering(DeploymentJobs.JobType type, Optional<Change> change, String reason, Instant triggerTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withTriggering(type, change, deployVersionFor(type, controller), deployRevisionFor(type, controller), reason, triggerTime), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(Deployment deployment) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.put(deployment.zone(), deployment); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(DeploymentJobs deploymentJobs) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange()), lock); } public LockedApplication withoutDeploymentIn(Zone zone) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.remove(zone); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication withoutDeploymentJob(DeploymentJobs.JobType jobType) { DeploymentJobs deploymentJobs = deploymentJobs().without(jobType); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange()), lock); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(new Application(id(), deploymentSpec, validationOverrides(), deployments(), deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides, deployments(), deploymentJobs(), deploying(), hasOutstandingChange()), lock); } public LockedApplication withDeploying(Optional<Change> deploying) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying, hasOutstandingChange()), lock); } public LockedApplication withOutstandingChange(boolean outstandingChange) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying(), outstandingChange), lock); } private Version deployVersionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? controller.systemVersion() : deployVersionIn(jobType.zone(controller.system()).get(), controller); } /** Returns the revision a new deployment to this zone should use for this application, or empty if we don't know */ private Optional<ApplicationRevision> deployRevisionIn(Zone zone) { if (deploying().isPresent() && deploying().get() instanceof ApplicationChange) return ((Change.ApplicationChange) deploying().get()).revision(); return revisionIn(zone); } /** Returns the revision this application is or should be deployed with in the given zone, or empty if unknown. */ private Optional<ApplicationRevision> revisionIn(Zone zone) { return Optional.ofNullable(deployments().get(zone)).map(Deployment::revision); } }
class LockedApplication extends Application { private final Lock lock; /** * LockedApplication should be acquired through ApplicationController and never constructed directly * * @param application Application instance for which lock has been acquired * @param lock Unused, but must be held when constructing this */ LockedApplication(Application application, Lock lock) { super(application.id(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.deploying(), application.hasOutstandingChange(), application.ownershipIssueId()); this.lock = Objects.requireNonNull(lock, "lock cannot be null"); } public LockedApplication withProjectId(long projectId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withProjectId(projectId), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(IssueId issueId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().with(issueId), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withJobCompletion(DeploymentJobs.JobReport report, Instant notificationTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withCompletion(report, notificationTime, controller), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withJobTriggering(DeploymentJobs.JobType type, Optional<Change> change, String reason, Instant triggerTime, Controller controller) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs().withTriggering(type, change, deployVersionFor(type, controller), deployRevisionFor(type, controller), reason, triggerTime), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(Deployment deployment) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.put(deployment.zone(), deployment); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(DeploymentJobs deploymentJobs) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withoutDeploymentIn(Zone zone) { Map<Zone, Deployment> deployments = new LinkedHashMap<>(deployments()); deployments.remove(zone); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments, deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withoutDeploymentJob(DeploymentJobs.JobType jobType) { DeploymentJobs deploymentJobs = deploymentJobs().without(jobType); return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs, deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(new Application(id(), deploymentSpec, validationOverrides(), deployments(), deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides, deployments(), deploymentJobs(), deploying(), hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withDeploying(Optional<Change> deploying) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying, hasOutstandingChange(), ownershipIssueId()), lock); } public LockedApplication withOutstandingChange(boolean outstandingChange) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying(), outstandingChange, ownershipIssueId()), lock); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(new Application(id(), deploymentSpec(), validationOverrides(), deployments(), deploymentJobs(), deploying(), hasOutstandingChange(), Optional.of(issueId)), lock); } private Version deployVersionFor(DeploymentJobs.JobType jobType, Controller controller) { return jobType == JobType.component ? controller.systemVersion() : deployVersionIn(jobType.zone(controller.system()).get(), controller); } /** Returns the revision a new deployment to this zone should use for this application, or empty if we don't know */ private Optional<ApplicationRevision> deployRevisionIn(Zone zone) { if (deploying().isPresent() && deploying().get() instanceof ApplicationChange) return ((Change.ApplicationChange) deploying().get()).revision(); return revisionIn(zone); } /** Returns the revision this application is or should be deployed with in the given zone, or empty if unknown. */ private Optional<ApplicationRevision> revisionIn(Zone zone) { return Optional.ofNullable(deployments().get(zone)).map(Deployment::revision); } }
`zone.isPresent()` seemed redundant, when `&&`-ed with `jobType.isProduction()`.
private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); }
return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system()));
private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (application.deploying().isPresent()) { if (jobType.isProduction() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().get() instanceof VersionChange && isOnNewerVersionInProductionThan(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && isOnNewerVersionInProductionThan(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } }
Not sure this really helped readability >_<
private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (application.deploying().isPresent()) { if (jobType.isProduction() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().get() instanceof VersionChange && isOnNewerVersionInProductionThan(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; }
}
private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && isOnNewerVersionInProductionThan(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } }
This seemed a bit strange, so inlined to make the strangness more apparent.
private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( ! application.deploymentSpec().canUpgradeAt(clock.instant()) || ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true; return false; }
! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return true;
private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (application.deploying().isPresent()) { if (jobType.isProduction() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().get() instanceof VersionChange && isOnNewerVersionInProductionThan(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! isProduction(job)) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } private boolean isProduction(JobType job) { Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; return zone.get().environment() == Environment.prod; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { LockedApplication application = applications().require(report.applicationId(), lock); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); } } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; return order.jobsFrom(application.deploymentSpec()).stream() .filter(JobType::isProduction) .allMatch(jobType -> application.deploymentJobs().isSuccessful(application.deploying().get(), jobType)); } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { Optional<LockedApplication> lockedApplication = controller.applications().get(application.id(), lock); if ( ! lockedApplication.isPresent()) continue; triggerReadyJobs(lockedApplication.get()); } } } /** Find the next step to trigger if any, and triggers it */ private void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target)) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus)) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if ( ! previous.lastSuccess().isPresent()) return false; if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! (targetVersion.equals(previous.lastSuccess().get().version())) ) return false; if (next != null && isOnNewerVersionInProductionThan(targetVersion, application, next.type())) return false; } if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().equals(nextSuccess.revision())) return true; if ( ! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { LockedApplication application = applications().require(applicationId, lock); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application); } } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), reason, clock.instant(), controller); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && isOnNewerVersionInProductionThan(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer than the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support. */ private boolean isOnNewerVersionInProductionThan(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<Zone> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version); } }
nice
private ResultList evaluateWithAtLeastOneNullSide(Object lhs, Object rhs) { if (operator.equals("==") || operator.equals("=")) { return ResultList.fromBoolean(lhs == rhs); } else if (operator.equals("!=")) { return ResultList.fromBoolean(lhs != rhs); } else { return new ResultList(Result.INVALID); } }
if (operator.equals("==") || operator.equals("=")) {
private ResultList evaluateWithAtLeastOneNullSide(Object lhs, Object rhs) { if (operator.equals("==") || operator.equals("=")) { return ResultList.fromBoolean(lhs == rhs); } else if (operator.equals("!=")) { return ResultList.fromBoolean(lhs != rhs); } else { return new ResultList(Result.INVALID); } }
class ComparisonNode implements ExpressionNode { private ExpressionNode lhs, rhs; private String operator; /** * Constructs a new comparison node. * * @param lhs The left-hand-side of the comparison. * @param operator The comparison operator. * @param rhs The right-hand-side of the comparison. */ public ComparisonNode(ExpressionNode lhs, String operator, ExpressionNode rhs) { this.lhs = lhs; this.operator = operator; this.rhs = rhs; } /** * Returns the left hand side of this comparison. * * @return The left hand side expression. */ public ExpressionNode getLHS() { return lhs; } /** * Sets the left hand side of this comparison. * * @param lhs The new left hand side. * @return This, to allow chaining. */ public ComparisonNode setLHS(ExpressionNode lhs) { this.lhs = lhs; return this; } /** * Returns the comparison operator of this. * * @return The operator. */ public String getOperator() { return operator; } /** * Sets the comparison operator of this. * * @param operator The operator string. * @return This, to allow chaining. */ public ComparisonNode setOperator(String operator) { this.operator = operator; return this; } /** * Returns the right hand side of this comparison. * * @return The right hand side expression. */ public ExpressionNode getRHS() { return rhs; } /** * Sets the right hand side of this comparison. * * @param rhs The new right hand side. * @return This, to allow chaining. */ public ComparisonNode setRHS(ExpressionNode rhs) { this.rhs = rhs; return this; } public OrderingSpecification getOrdering(IdNode lhs, LiteralNode rhs, String operator, int order) { if (lhs.getWidthBits() == -1 || lhs.getDivisionBits() == -1 || !(rhs.getValue() instanceof Long)) { return null; } if (operator.equals("==") || operator.equals("=")) { return new OrderingSpecification(order, (Long)rhs.getValue(), lhs.getWidthBits(), lhs.getDivisionBits()); } if (order == OrderingSpecification.ASCENDING) { if ((operator.equals("<") || operator.equals("<="))) { return new OrderingSpecification(order, 0, lhs.getWidthBits(), lhs.getDivisionBits()); } if (operator.equals(">")) { return new OrderingSpecification(order, (Long)rhs.getValue() + 1, lhs.getWidthBits(), lhs.getDivisionBits()); } if (operator.equals(">=")) { return new OrderingSpecification(order, (Long)rhs.getValue(), lhs.getWidthBits(), lhs.getDivisionBits()); } } else { if (operator.equals("<")) { return new OrderingSpecification(order, (Long)rhs.getValue() - 1, lhs.getWidthBits(), lhs.getDivisionBits()); } if (operator.equals("<=")) { return new OrderingSpecification(order, (Long)rhs.getValue(), lhs.getWidthBits(), lhs.getDivisionBits()); } } return null; } public OrderingSpecification getOrdering(int order) { if (lhs instanceof IdNode && rhs instanceof LiteralNode) { return getOrdering((IdNode)lhs, (LiteralNode)rhs, operator, order); } else if (rhs instanceof IdNode && lhs instanceof LiteralNode) { return getOrdering((IdNode)rhs, (LiteralNode)rhs, operator, order); } return null; } public BucketSet getBucketSet(BucketIdFactory factory) { if (operator.equals("==") || operator.equals("=")) { if (lhs instanceof IdNode && rhs instanceof LiteralNode) { return compare(factory, (IdNode)lhs, (LiteralNode)rhs, operator); } else if (rhs instanceof IdNode && lhs instanceof LiteralNode) { return compare(factory, (IdNode)rhs, (LiteralNode)lhs, operator); } else if (lhs instanceof SearchColumnNode && rhs instanceof LiteralNode) { return compare(factory, (SearchColumnNode)lhs, (LiteralNode)rhs); } else if (rhs instanceof SearchColumnNode && lhs instanceof LiteralNode) { return compare(factory, (SearchColumnNode)rhs, (LiteralNode)lhs); } } return null; } /** * Compares a search column node with a literal node. * * @param factory The bucket id factory used. * @param node The search column node. * @param literal The literal node to compare to. * @return The bucket set containing the buckets covered. */ private BucketSet compare(BucketIdFactory factory, SearchColumnNode node, LiteralNode literal) { Object value = literal.getValue(); int bucketCount = (int) Math.pow(2, 16); if (value instanceof Long) { BucketSet ret = new BucketSet(); for (int i = 0; i < bucketCount; i++) { BucketId id = new BucketId(16, i); if ((Long)value == node.getDistribution().getColumn(id)) { ret.add(new BucketId(16, i)); } } return ret; } return null; } private BucketSet compare(BucketIdFactory factory, IdNode id, LiteralNode literal, String operator) { String field = id.getField(); Object value = literal.getValue(); if (field == null) { if (value instanceof String) { String name = (String)value; if ((operator.equals("=") && name.contains("*")) || (operator.equals("=~") && ((name.contains("*") || name.contains("?"))))) { return null; } return new BucketSet(factory.getBucketId(new DocumentId(name))); } } else if (field.equalsIgnoreCase("user")) { if (value instanceof Long) { return new BucketSet(new BucketId(factory.getLocationBitCount(), (Long)value)); } } else if (field.equalsIgnoreCase("group")) { if (value instanceof String) { String name = (String)value; if ((operator.equals("=") && name.contains("*")) || (operator.equals("=~") && ((name.contains("*") || name.contains("?"))))) { return null; } return new BucketSet(new BucketId(factory.getLocationBitCount(), new GroupDocIdString("", name, "").getLocation())); } } else if (field.equalsIgnoreCase("bucket")) { if (value instanceof Long) { return new BucketSet(new BucketId((Long)value)); } } return null; } public Object evaluate(Context context) { Object oLeft = lhs.evaluate(context); Object oRight = rhs.evaluate(context); if (oLeft == null || oRight == null) { return evaluateWithAtLeastOneNullSide(oLeft, oRight); } if (oLeft == Result.INVALID || oRight == Result.INVALID) { return new ResultList(Result.INVALID); } if (oLeft instanceof AttributeNode.VariableValueList && oRight instanceof AttributeNode.VariableValueList) { if (operator.equals("==")) { return evaluateListsTrue((AttributeNode.VariableValueList)oLeft, (AttributeNode.VariableValueList)oRight); } else if (operator.equals("!=")) { return evaluateListsFalse((AttributeNode.VariableValueList)oLeft, (AttributeNode.VariableValueList)oRight); } else { return new ResultList(Result.INVALID); } } else if (oLeft instanceof AttributeNode.VariableValueList) { return evaluateListAndSingle((AttributeNode.VariableValueList)oLeft, oRight); } else if (oRight instanceof AttributeNode.VariableValueList) { return evaluateListAndSingle((AttributeNode.VariableValueList)oRight, oLeft); } return new ResultList(evaluateBool(oLeft, oRight)); } /** * Evaluates a binary comparison where one or both operands are null. * Boolean outcomes are only defined for (in)equality relations, all others * return Result.INVALID. * * Precondition: lhs AND/OR rhs is null. */ public ResultList evaluateListsTrue(AttributeNode.VariableValueList lhs, AttributeNode.VariableValueList rhs) { if (lhs.size() != rhs.size()) { return new ResultList(Result.FALSE); } for (int i = 0; i < lhs.size(); i++) { if (!lhs.get(i).getVariables().equals(rhs.get(i).getVariables())) { return new ResultList(Result.FALSE); } if (evaluateEquals(lhs.get(i).getValue(), rhs.get(i).getValue()) == Result.FALSE) { return new ResultList(Result.FALSE); } } return new ResultList(Result.TRUE); } public ResultList evaluateListsFalse(AttributeNode.VariableValueList lhs, AttributeNode.VariableValueList rhs) { ResultList lst = evaluateListsTrue(lhs, rhs); if (lst.toResult() == Result.TRUE) { return new ResultList(Result.FALSE); } else if (lst.toResult() == Result.FALSE) { return new ResultList(Result.TRUE); } else { return lst; } } public ResultList evaluateListAndSingle(AttributeNode.VariableValueList lhs, Object rhs) { if (rhs == null && lhs == null) { return new ResultList(Result.TRUE); } if (rhs == null || lhs == null) { return new ResultList(Result.FALSE); } ResultList retVal = new ResultList(); for (int i = 0; i < lhs.size(); i++) { Result result = evaluateBool(lhs.get(i).getValue(), rhs); retVal.add((FieldPathIteratorHandler.VariableMap)lhs.get(i).getVariables().clone(), result); } return retVal; } /** * Evaluate this expression on two operands, given that they are not invalid. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return The evaluation result. */ private Result evaluateBool(Object lhs, Object rhs) { if (operator.equals("==")) { return evaluateEquals(lhs, rhs); } else if (operator.equals("!=")) { return Result.invert(evaluateEquals(lhs, rhs)); } else if (operator.equals("<") || operator.equals("<=") || operator.equals(">") || operator.equals(">=")) { return evaluateNumber(lhs, rhs); } else if (operator.equals("=~") || operator.equals("=")) { return evaluateString(lhs, rhs); } throw new IllegalStateException("Comparison operator '" + operator + "' is not supported."); } /** * Compare two operands for equality. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return Wether or not the two operands are equal. */ private Result evaluateEquals(Object lhs, Object rhs) { if (lhs == null || rhs == null) { return Result.toResult(lhs == rhs); } double a = getAsNumber(lhs); double b = getAsNumber(rhs); if (Double.isNaN(a) || Double.isNaN(b)) { return Result.toResult(lhs.toString().equals(rhs.toString())); } return Result.toResult(a == b); } private double getAsNumber(Object value) { if (value instanceof Number) { return ((Number)value).doubleValue(); } else if (value instanceof NumericFieldValue) { return getAsNumber(((NumericFieldValue)value).getNumber()); } else { return Double.NaN; } } /** * Evalutes the value of this term over a document, given that both operands must be numbers. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return The evaluation result. */ private Result evaluateNumber(Object lhs, Object rhs) { double a = getAsNumber(lhs); double b = getAsNumber(rhs); if (Double.isNaN(a) || Double.isNaN(b)) { return Result.INVALID; } if (operator.equals("<")) { return Result.toResult(a < b); } else if (operator.equals("<=")) { return Result.toResult(a <= b); } else if (operator.equals(">")) { return Result.toResult(a > b); } else { return Result.toResult(a >= b); } } /** * Evalutes the value of this term over a document, given that both operands must be strings. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return The evaluation result. */ private Result evaluateString(Object lhs, Object rhs) { String left = "" + lhs; String right = "" + rhs; if (operator.equals("=~")) { return Result.toResult(Pattern.compile(right).matcher(left).find()); } else { return Result.toResult(Pattern.compile(globToRegex(right)).matcher(left).find()); } } /** * Converts a glob pattern to a corresponding regular expression string. * * @param glob The glob pattern. * @return The regex string. */ private String globToRegex(String glob) { StringBuilder ret = new StringBuilder(); ret.append("^"); for (int i = 0; i < glob.length(); i++) { ret.append(globToRegex(glob.charAt(i))); } ret.append("$"); return ret.toString(); } /** * Converts a single character in a glob expression to the corresponding regular expression string. * * @param glob The glob character. * @return The regex string. */ private String globToRegex(char glob) { switch (glob) { case'*': return ".*"; case'?': return "."; case'^': case'$': case'|': case'{': case'}': case'(': case')': case'[': case']': case'\\': case'+': case'.': return "\\" + glob; default: return "" + glob; } } public void accept(Visitor visitor) { visitor.visit(this); } @Override public String toString() { return lhs + " " + operator + " " + rhs; } }
class ComparisonNode implements ExpressionNode { private ExpressionNode lhs, rhs; private String operator; /** * Constructs a new comparison node. * * @param lhs The left-hand-side of the comparison. * @param operator The comparison operator. * @param rhs The right-hand-side of the comparison. */ public ComparisonNode(ExpressionNode lhs, String operator, ExpressionNode rhs) { this.lhs = lhs; this.operator = operator; this.rhs = rhs; } /** * Returns the left hand side of this comparison. * * @return The left hand side expression. */ public ExpressionNode getLHS() { return lhs; } /** * Sets the left hand side of this comparison. * * @param lhs The new left hand side. * @return This, to allow chaining. */ public ComparisonNode setLHS(ExpressionNode lhs) { this.lhs = lhs; return this; } /** * Returns the comparison operator of this. * * @return The operator. */ public String getOperator() { return operator; } /** * Sets the comparison operator of this. * * @param operator The operator string. * @return This, to allow chaining. */ public ComparisonNode setOperator(String operator) { this.operator = operator; return this; } /** * Returns the right hand side of this comparison. * * @return The right hand side expression. */ public ExpressionNode getRHS() { return rhs; } /** * Sets the right hand side of this comparison. * * @param rhs The new right hand side. * @return This, to allow chaining. */ public ComparisonNode setRHS(ExpressionNode rhs) { this.rhs = rhs; return this; } public OrderingSpecification getOrdering(IdNode lhs, LiteralNode rhs, String operator, int order) { if (lhs.getWidthBits() == -1 || lhs.getDivisionBits() == -1 || !(rhs.getValue() instanceof Long)) { return null; } if (operator.equals("==") || operator.equals("=")) { return new OrderingSpecification(order, (Long)rhs.getValue(), lhs.getWidthBits(), lhs.getDivisionBits()); } if (order == OrderingSpecification.ASCENDING) { if ((operator.equals("<") || operator.equals("<="))) { return new OrderingSpecification(order, 0, lhs.getWidthBits(), lhs.getDivisionBits()); } if (operator.equals(">")) { return new OrderingSpecification(order, (Long)rhs.getValue() + 1, lhs.getWidthBits(), lhs.getDivisionBits()); } if (operator.equals(">=")) { return new OrderingSpecification(order, (Long)rhs.getValue(), lhs.getWidthBits(), lhs.getDivisionBits()); } } else { if (operator.equals("<")) { return new OrderingSpecification(order, (Long)rhs.getValue() - 1, lhs.getWidthBits(), lhs.getDivisionBits()); } if (operator.equals("<=")) { return new OrderingSpecification(order, (Long)rhs.getValue(), lhs.getWidthBits(), lhs.getDivisionBits()); } } return null; } public OrderingSpecification getOrdering(int order) { if (lhs instanceof IdNode && rhs instanceof LiteralNode) { return getOrdering((IdNode)lhs, (LiteralNode)rhs, operator, order); } else if (rhs instanceof IdNode && lhs instanceof LiteralNode) { return getOrdering((IdNode)rhs, (LiteralNode)rhs, operator, order); } return null; } public BucketSet getBucketSet(BucketIdFactory factory) { if (operator.equals("==") || operator.equals("=")) { if (lhs instanceof IdNode && rhs instanceof LiteralNode) { return compare(factory, (IdNode)lhs, (LiteralNode)rhs, operator); } else if (rhs instanceof IdNode && lhs instanceof LiteralNode) { return compare(factory, (IdNode)rhs, (LiteralNode)lhs, operator); } else if (lhs instanceof SearchColumnNode && rhs instanceof LiteralNode) { return compare(factory, (SearchColumnNode)lhs, (LiteralNode)rhs); } else if (rhs instanceof SearchColumnNode && lhs instanceof LiteralNode) { return compare(factory, (SearchColumnNode)rhs, (LiteralNode)lhs); } } return null; } /** * Compares a search column node with a literal node. * * @param factory The bucket id factory used. * @param node The search column node. * @param literal The literal node to compare to. * @return The bucket set containing the buckets covered. */ private BucketSet compare(BucketIdFactory factory, SearchColumnNode node, LiteralNode literal) { Object value = literal.getValue(); int bucketCount = (int) Math.pow(2, 16); if (value instanceof Long) { BucketSet ret = new BucketSet(); for (int i = 0; i < bucketCount; i++) { BucketId id = new BucketId(16, i); if ((Long)value == node.getDistribution().getColumn(id)) { ret.add(new BucketId(16, i)); } } return ret; } return null; } private BucketSet compare(BucketIdFactory factory, IdNode id, LiteralNode literal, String operator) { String field = id.getField(); Object value = literal.getValue(); if (field == null) { if (value instanceof String) { String name = (String)value; if ((operator.equals("=") && name.contains("*")) || (operator.equals("=~") && ((name.contains("*") || name.contains("?"))))) { return null; } return new BucketSet(factory.getBucketId(new DocumentId(name))); } } else if (field.equalsIgnoreCase("user")) { if (value instanceof Long) { return new BucketSet(new BucketId(factory.getLocationBitCount(), (Long)value)); } } else if (field.equalsIgnoreCase("group")) { if (value instanceof String) { String name = (String)value; if ((operator.equals("=") && name.contains("*")) || (operator.equals("=~") && ((name.contains("*") || name.contains("?"))))) { return null; } return new BucketSet(new BucketId(factory.getLocationBitCount(), new GroupDocIdString("", name, "").getLocation())); } } else if (field.equalsIgnoreCase("bucket")) { if (value instanceof Long) { return new BucketSet(new BucketId((Long)value)); } } return null; } public Object evaluate(Context context) { Object oLeft = lhs.evaluate(context); Object oRight = rhs.evaluate(context); if (oLeft == null || oRight == null) { return evaluateWithAtLeastOneNullSide(oLeft, oRight); } if (oLeft == Result.INVALID || oRight == Result.INVALID) { return new ResultList(Result.INVALID); } if (oLeft instanceof AttributeNode.VariableValueList && oRight instanceof AttributeNode.VariableValueList) { if (operator.equals("==")) { return evaluateListsTrue((AttributeNode.VariableValueList)oLeft, (AttributeNode.VariableValueList)oRight); } else if (operator.equals("!=")) { return evaluateListsFalse((AttributeNode.VariableValueList)oLeft, (AttributeNode.VariableValueList)oRight); } else { return new ResultList(Result.INVALID); } } else if (oLeft instanceof AttributeNode.VariableValueList) { return evaluateListAndSingle((AttributeNode.VariableValueList)oLeft, oRight); } else if (oRight instanceof AttributeNode.VariableValueList) { return evaluateListAndSingle((AttributeNode.VariableValueList)oRight, oLeft); } return new ResultList(evaluateBool(oLeft, oRight)); } /** * Evaluates a binary comparison where one or both operands are null. * Boolean outcomes are only defined for (in)equality relations, all others * return Result.INVALID. * * Precondition: lhs AND/OR rhs is null. */ public ResultList evaluateListsTrue(AttributeNode.VariableValueList lhs, AttributeNode.VariableValueList rhs) { if (lhs.size() != rhs.size()) { return new ResultList(Result.FALSE); } for (int i = 0; i < lhs.size(); i++) { if (!lhs.get(i).getVariables().equals(rhs.get(i).getVariables())) { return new ResultList(Result.FALSE); } if (evaluateEquals(lhs.get(i).getValue(), rhs.get(i).getValue()) == Result.FALSE) { return new ResultList(Result.FALSE); } } return new ResultList(Result.TRUE); } public ResultList evaluateListsFalse(AttributeNode.VariableValueList lhs, AttributeNode.VariableValueList rhs) { ResultList lst = evaluateListsTrue(lhs, rhs); if (lst.toResult() == Result.TRUE) { return new ResultList(Result.FALSE); } else if (lst.toResult() == Result.FALSE) { return new ResultList(Result.TRUE); } else { return lst; } } public ResultList evaluateListAndSingle(AttributeNode.VariableValueList lhs, Object rhs) { if (rhs == null && lhs == null) { return new ResultList(Result.TRUE); } if (rhs == null || lhs == null) { return new ResultList(Result.FALSE); } ResultList retVal = new ResultList(); for (int i = 0; i < lhs.size(); i++) { Result result = evaluateBool(lhs.get(i).getValue(), rhs); retVal.add((FieldPathIteratorHandler.VariableMap)lhs.get(i).getVariables().clone(), result); } return retVal; } /** * Evaluate this expression on two operands, given that they are not invalid. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return The evaluation result. */ private Result evaluateBool(Object lhs, Object rhs) { if (operator.equals("==")) { return evaluateEquals(lhs, rhs); } else if (operator.equals("!=")) { return Result.invert(evaluateEquals(lhs, rhs)); } else if (operator.equals("<") || operator.equals("<=") || operator.equals(">") || operator.equals(">=")) { return evaluateNumber(lhs, rhs); } else if (operator.equals("=~") || operator.equals("=")) { return evaluateString(lhs, rhs); } throw new IllegalStateException("Comparison operator '" + operator + "' is not supported."); } /** * Compare two operands for equality. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return Wether or not the two operands are equal. */ private Result evaluateEquals(Object lhs, Object rhs) { if (lhs == null || rhs == null) { return Result.toResult(lhs == rhs); } double a = getAsNumber(lhs); double b = getAsNumber(rhs); if (Double.isNaN(a) || Double.isNaN(b)) { return Result.toResult(lhs.toString().equals(rhs.toString())); } return Result.toResult(a == b); } private double getAsNumber(Object value) { if (value instanceof Number) { return ((Number)value).doubleValue(); } else if (value instanceof NumericFieldValue) { return getAsNumber(((NumericFieldValue)value).getNumber()); } else { return Double.NaN; } } /** * Evalutes the value of this term over a document, given that both operands must be numbers. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return The evaluation result. */ private Result evaluateNumber(Object lhs, Object rhs) { double a = getAsNumber(lhs); double b = getAsNumber(rhs); if (Double.isNaN(a) || Double.isNaN(b)) { return Result.INVALID; } if (operator.equals("<")) { return Result.toResult(a < b); } else if (operator.equals("<=")) { return Result.toResult(a <= b); } else if (operator.equals(">")) { return Result.toResult(a > b); } else { return Result.toResult(a >= b); } } /** * Evalutes the value of this term over a document, given that both operands must be strings. * * @param lhs Left hand side of operation. * @param rhs Right hand side of operation. * @return The evaluation result. */ private Result evaluateString(Object lhs, Object rhs) { String left = "" + lhs; String right = "" + rhs; if (operator.equals("=~")) { return Result.toResult(Pattern.compile(right).matcher(left).find()); } else { return Result.toResult(Pattern.compile(globToRegex(right)).matcher(left).find()); } } /** * Converts a glob pattern to a corresponding regular expression string. * * @param glob The glob pattern. * @return The regex string. */ private String globToRegex(String glob) { StringBuilder ret = new StringBuilder(); ret.append("^"); for (int i = 0; i < glob.length(); i++) { ret.append(globToRegex(glob.charAt(i))); } ret.append("$"); return ret.toString(); } /** * Converts a single character in a glob expression to the corresponding regular expression string. * * @param glob The glob character. * @return The regex string. */ private String globToRegex(char glob) { switch (glob) { case'*': return ".*"; case'?': return "."; case'^': case'$': case'|': case'{': case'}': case'(': case')': case'[': case']': case'\\': case'+': case'.': return "\\" + glob; default: return "" + glob; } } public void accept(Visitor visitor) { visitor.visit(this); } @Override public String toString() { return lhs + " " + operator + " " + rhs; } }
FixedDelay to avoid potential queueing of tasks?
private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleAtFixedRate(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); }
checkForRemovedApplicationsService.scheduleAtFixedRate(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(),
private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.INFO, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); } }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.DEBUG, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); } }
This may become too noisy? How about logging at INFO in removeApplicationsExcept for those applications that are actually removed by this?
private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.INFO, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); }
log.log(LogLevel.INFO, "Removing stale applications for tenant '" + tenant +
private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.DEBUG, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleAtFixedRate(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } }
There is logging in removeApplicationsExcept already, so yes should lower to DEBUG.
private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.INFO, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); }
log.log(LogLevel.INFO, "Removing stale applications for tenant '" + tenant +
private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.DEBUG, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleAtFixedRate(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } }
Yeah, good point, will do
private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleAtFixedRate(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); }
checkForRemovedApplicationsService.scheduleAtFixedRate(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(),
private ZKTenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.INFO, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); } }
class ZKTenantApplications implements TenantApplications, PathChildrenCacheListener { private static final Logger log = Logger.getLogger(ZKTenantApplications.class.getName()); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private final Curator curator; private final Path applicationsPath; private final ExecutorService pathChildrenExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(ZKTenantApplications.class.getName())); private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new ZKTenantApplications(curator, Tenants.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant) + "Error creating application repo", e); } } private long readSessionId(ApplicationId appId, String appNode) { String path = applicationsPath.append(appNode).getAbsolute(); try { return Long.parseLong(Utf8.toString(curator.framework().getData().forPath(path))); } catch (Exception e) { throw new IllegalArgumentException(Tenants.logPre(appId) + "Unable to read the session id from '" + path + "'", e); } } @Override public List<ApplicationId> listApplications() { try { List<String> appNodes = curator.framework().getChildren().forPath(applicationsPath.getAbsolute()); List<ApplicationId> applicationIds = new ArrayList<>(); for (String appNode : appNodes) { parseApplication(appNode).ifPresent(applicationIds::add); } return applicationIds; } catch (Exception e) { throw new RuntimeException(Tenants.logPre(tenant)+"Unable to list applications", e); } } private Optional<ApplicationId> parseApplication(String appNode) { try { return Optional.of(ApplicationId.fromSerializedForm(appNode)); } catch (IllegalArgumentException e) { log.log(LogLevel.INFO, Tenants.logPre(tenant)+"Unable to parse application with id '" + appNode + "', ignoring."); return Optional.empty(); } } @Override public Transaction createPutApplicationTransaction(ApplicationId applicationId, long sessionId) { if (listApplications().contains(applicationId)) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationsPath.append(applicationId.serializedForm()).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } @Override public long getSessionIdForApplication(ApplicationId applicationId) { return readSessionId(applicationId, applicationId.serializedForm()); } @Override public CuratorTransaction deleteApplication(ApplicationId applicationId) { Path path = applicationsPath.append(applicationId.serializedForm()); return CuratorTransaction.from(CuratorOperations.delete(path.getAbsolute()), curator); } @Override public void close() { directoryCache.close(); pathChildrenExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); } @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, Tenants.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, Tenants.logPre(applicationId) + "Application added: " + applicationId); } private void removeApplications() { ImmutableSet<ApplicationId> activeApplications = ImmutableSet.copyOf(listApplications()); log.log(LogLevel.DEBUG, "Removing stale applications for tenant '" + tenant + "', not removing these active applications: " + activeApplications); reloadHandler.removeApplicationsExcept(activeApplications); } }
New test. Other changes are just replacing method overrides with builder.
public void testApplicationApi() throws Exception { ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles); ContainerTester tester = controllerTester.containerTester(); tester.updateSystemVersion(); addTenantAthenzDomain(athenzUserDomain, "mytenant"); tester.assertResponse(request("/application/v4/", GET), new File("root.json")); tester.assertResponse(request("/application/v4/athensDomain/", GET), new File("athensDomain-list.json")); tester.assertResponse(request("/application/v4/property/", GET), new File("property-list.json")); tester.assertResponse(request("/application/v4/cookiefreshness/", GET), new File("cookiefreshness.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/user", GET), new File("user.json")); tester.assertResponse(request("/application/v4/tenant/", GET), new File("tenant-list.json")); addTenantAthenzDomain("domain2", "mytenant"); addPropertyData((MockOrganization) controllerTester.controller().organization(), "1234"); tester.assertResponse(request("/application/v4/tenant/tenant2", POST) .data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"), new File("tenant-without-applications-with-id.json")); tester.assertResponse(request("/application/v4/tenant/tenant2", PUT) .data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"), new File("tenant-without-applications-with-id.json")); tester.assertResponse(request("/application/v4/tenant/tenant2", GET), new File("tenant-without-applications-with-id.json")); tester.assertResponse(request("/application/v4/tenant/tenant3", POST) .data("{\"userGroup\":\"group1\",\"property\":\"property1\",\"propertyId\":\"1234\"}"), new File("opsdb-tenant-with-id-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant3", PUT) .data("{\"userGroup\":\"group1\",\"property\":\"property2\",\"propertyId\":\"4321\"}"), new File("opsdb-tenant-with-new-id-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", GET), new File("tenant-with-application.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET), new File("application-list.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", POST) .data("6.1.0"), new File("application-deployment.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", DELETE), new File("application-deployment-cancelled.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", DELETE), new File("application-deployment-cancelled-no-op.json")); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-result.json")); ApplicationId id = ApplicationId.from("tenant1", "application1", "default"); long screwdriverProjectId = 123; addScrewdriverUserToDomain("screwdriveruser1", "domain1"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", POST) .data("6.1.0"), new File("application-deployment.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST) .data(createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId))) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default"); controllerTester.notifyJobCompletion(id, screwdriverProjectId, true, DeploymentJobs.JobType.systemTest); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default/", POST) .data(createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId))) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default"); controllerTester.notifyJobCompletion(id, screwdriverProjectId, true, DeploymentJobs.JobType.stagingTest); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/", POST) .data(createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId))) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, screwdriverProjectId, false, DeploymentJobs.JobType.productionCorpUsEast1); tester.assertResponse(request("/application/v4/tenant-pipeline/", GET), new File("tenant-pipelines.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET), new File("application.json")); setDeploymentMaintainedInfo(controllerTester); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", GET), new File("deployment.json")); tester.assertResponse(request("/application/v4/", GET) .domain("domain1").user("mytenant") .recursive("deployment"), new File("recursive-root.json")); tester.assertResponse(request("/application/v4/", GET) .domain("domain1").user("mytenant") .recursive("tenant"), new File("recursive-until-tenant-root.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/", GET) .domain("domain1").user("mytenant") .recursive("true"), new File("tenant1-recursive.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/", GET) .domain("domain1").user("mytenant") .recursive("true"), new File("application1.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/restart", POST), "Requested restart of tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/restart?hostname=host1", POST), "Requested restart of tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/log", POST), new File("log-response.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/converge", GET), new File("convergence.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/service", GET), new File("services.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET), new File("service.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE), ""); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), new File("tenant-without-applications.json")); byte[] data = new byte[0]; tester.assertResponse(request("/application/v4/user?user=newuser&domain=by", PUT) .data(data) .domain(athenzUserDomain).user("newuser"), new File("create-user-response.json")); tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS), ""); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation", GET), new File("global-rotation.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation/override", GET), new File("global-rotation-get.json")); tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation/override", PUT) .data("{\"reason\":\"because i can\"}"), new File("global-rotation-put.json")); tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation/override", DELETE) .data("{\"reason\":\"because i can\"}"), new File("global-rotation-delete.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/promote", POST), "{\"message\":\"Successfully copied environment hosted-verified-prod to hosted-instance_tenant1_application1_placeholder_component_default\"}"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/promote", POST), "{\"message\":\"Successfully copied environment hosted-instance_tenant1_application1_placeholder_component_default to hosted-instance_tenant1_application1_us-west-1_prod_default\"}"); controllerTester.controller().deconstruct(); }
new File("recursive-until-tenant-root.json"));
public void testApplicationApi() throws Exception { ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles); ContainerTester tester = controllerTester.containerTester(); tester.updateSystemVersion(); addTenantAthenzDomain(athenzUserDomain, "mytenant"); tester.assertResponse(request("/application/v4/", GET), new File("root.json")); tester.assertResponse(request("/application/v4/athensDomain/", GET), new File("athensDomain-list.json")); tester.assertResponse(request("/application/v4/property/", GET), new File("property-list.json")); tester.assertResponse(request("/application/v4/cookiefreshness/", GET), new File("cookiefreshness.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/user", GET), new File("user.json")); tester.assertResponse(request("/application/v4/tenant/", GET), new File("tenant-list.json")); addTenantAthenzDomain("domain2", "mytenant"); addPropertyData((MockOrganization) controllerTester.controller().organization(), "1234"); tester.assertResponse(request("/application/v4/tenant/tenant2", POST) .data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"), new File("tenant-without-applications-with-id.json")); tester.assertResponse(request("/application/v4/tenant/tenant2", PUT) .data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"), new File("tenant-without-applications-with-id.json")); tester.assertResponse(request("/application/v4/tenant/tenant2", GET), new File("tenant-without-applications-with-id.json")); tester.assertResponse(request("/application/v4/tenant/tenant3", POST) .data("{\"userGroup\":\"group1\",\"property\":\"property1\",\"propertyId\":\"1234\"}"), new File("opsdb-tenant-with-id-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant3", PUT) .data("{\"userGroup\":\"group1\",\"property\":\"property2\",\"propertyId\":\"4321\"}"), new File("opsdb-tenant-with-new-id-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", GET), new File("tenant-with-application.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET), new File("application-list.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", POST) .data("6.1.0"), new File("application-deployment.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", DELETE), new File("application-deployment-cancelled.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", DELETE), new File("application-deployment-cancelled-no-op.json")); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-result.json")); ApplicationId id = ApplicationId.from("tenant1", "application1", "default"); long screwdriverProjectId = 123; addScrewdriverUserToDomain("screwdriveruser1", "domain1"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", POST) .data("6.1.0"), new File("application-deployment.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST) .data(createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId))) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default"); controllerTester.notifyJobCompletion(id, screwdriverProjectId, true, DeploymentJobs.JobType.systemTest); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default/", POST) .data(createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId))) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default"); controllerTester.notifyJobCompletion(id, screwdriverProjectId, true, DeploymentJobs.JobType.stagingTest); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/", POST) .data(createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId))) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, screwdriverProjectId, false, DeploymentJobs.JobType.productionCorpUsEast1); tester.assertResponse(request("/application/v4/tenant-pipeline/", GET), new File("tenant-pipelines.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET), new File("application.json")); setDeploymentMaintainedInfo(controllerTester); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", GET), new File("deployment.json")); tester.assertResponse(request("/application/v4/", GET) .domain("domain1").user("mytenant") .recursive("deployment"), new File("recursive-root.json")); tester.assertResponse(request("/application/v4/", GET) .domain("domain1").user("mytenant") .recursive("tenant"), new File("recursive-until-tenant-root.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/", GET) .domain("domain1").user("mytenant") .recursive("true"), new File("tenant1-recursive.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/", GET) .domain("domain1").user("mytenant") .recursive("true"), new File("application1.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/restart", POST), "Requested restart of tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/restart?hostname=host1", POST), "Requested restart of tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/log", POST), new File("log-response.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/converge", GET), new File("convergence.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/service", GET), new File("services.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET), new File("service.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", DELETE), "Deactivated tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE), ""); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), new File("tenant-without-applications.json")); byte[] data = new byte[0]; tester.assertResponse(request("/application/v4/user?user=newuser&domain=by", PUT) .data(data) .domain(athenzUserDomain).user("newuser"), new File("create-user-response.json")); tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS), ""); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation", GET), new File("global-rotation.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation/override", GET), new File("global-rotation-get.json")); tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation/override", PUT) .data("{\"reason\":\"because i can\"}"), new File("global-rotation-put.json")); tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation/override", DELETE) .data("{\"reason\":\"because i can\"}"), new File("global-rotation-delete.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/promote", POST), "{\"message\":\"Successfully copied environment hosted-verified-prod to hosted-instance_tenant1_application1_placeholder_component_default\"}"); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/promote", POST), "{\"message\":\"Successfully copied environment hosted-instance_tenant1_application1_placeholder_component_default to hosted-instance_tenant1_application1_us-west-1_prod_default\"}"); controllerTester.controller().deconstruct(); }
class ApplicationApiTest extends ControllerContainerTest { private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/"; private static final ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .build(); private static final String athenzUserDomain = "domain1"; private static final String athenzScrewdriverDomain = AthenzUtils.SCREWDRIVER_DOMAIN.id(); @Test @Test public void testDeployDirectly() throws Exception { ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles); ContainerTester tester = controllerTester.containerTester(); tester.updateSystemVersion(); addTenantAthenzDomain(athenzUserDomain, "mytenant"); addScrewdriverUserToDomain("screwdriveruser1", "domain1"); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/deploy", POST) .data(entity) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); } @Test public void testSortsDeploymentsAndJobs() throws Exception { ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles); ContainerTester tester = controllerTester.containerTester(); tester.updateSystemVersion(); addTenantAthenzDomain(athenzUserDomain, "mytenant"); addScrewdriverUserToDomain("screwdriveruser1", "domain1"); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); ApplicationId id = ApplicationId.from("tenant1", "application1", "default"); long projectId = 1; HttpEntity deployData = createApplicationDeployData(applicationPackage, Optional.of(projectId)); startAndTestChange(controllerTester, id, projectId, deployData); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy", POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsEast3); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); deployData = createApplicationDeployData(applicationPackage, Optional.of(projectId)); startAndTestChange(controllerTester, id, projectId, deployData); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsWest1); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy", POST) .data(deployData).domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsEast3); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET), new File("application-without-change-multiple-deployments.json")); } @Test public void testErrorResponses() throws Exception { ContainerTester tester = new ContainerTester(container, responseFiles); tester.updateSystemVersion(); addTenantAthenzDomain("domain1", "mytenant"); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1", GET), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET), "{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET), "{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant2", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}", 400); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}", 400); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"An application with id 'tenant1.application1' already exists\"}", 400); ConfigServerClientMock configServer = (ConfigServerClientMock)container.components().getComponent("com.yahoo.vespa.hosted.controller.ConfigServerClientMock"); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, null)); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-failure.json"), 400); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.OUT_OF_CAPACITY, null)); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-out-of-capacity.json"), 400); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to activate application", ConfigServerException.ErrorCode.ACTIVATION_CONFLICT, null)); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-activation-conflict.json"), 409); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Internal server error", ConfigServerException.ErrorCode.INTERNAL_SERVER_ERROR, null)); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-internal-server-error.json"), 500); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}", 400); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE), ""); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete application 'tenant1.application1': Application not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete tenant 'tenant1': Tenant not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/dontexist/application/dontexist/environment/prod/region/us-west-1/instance/default/promote", POST), "{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"Unable to promote Chef environments for application\"}", 500); } @Test public void testAuthorization() throws Exception { ContainerTester tester = new ContainerTester(container, responseFiles); String authorizedUser = "mytenant"; String unauthorizedUser = "othertenant"; tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(null), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User is not authenticated\"}", 403); tester.assertResponse(request("/application/v4/tenant/", GET) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(null), "[]", 200); addTenantAthenzDomain("domain1", "mytenant"); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'othertenant' is not admin in Athenz domain 'domain1'\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(authorizedUser), new File("tenant-without-applications.json"), 200); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST) .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST) .domain("domain1").user(authorizedUser), new File("application-reference.json"), 200); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), "{\"error-code\":\"FORBIDDEN\",\"message\":\"Principal 'mytenant' is not a Screwdriver principal. Excepted principal with Athenz domain 'cd.screwdriver.project', got 'domain1'.\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE) .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE) .domain("domain1").user(authorizedUser), "", 200); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); addTenantAthenzDomain("domain2", "mytenant"); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}") .domain("domain1").user(authorizedUser), "{\"tenant\":\"tenant1\",\"type\":\"ATHENS\",\"athensDomain\":\"domain2\",\"property\":\"property1\",\"applications\":[]}", 200); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE) .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); } private HttpEntity createApplicationDeployData(ApplicationPackage applicationPackage, Optional<Long> screwdriverJobId) { MultipartEntityBuilder builder = MultipartEntityBuilder.create(); builder.addTextBody("deployOptions", deployOptions(screwdriverJobId), ContentType.APPLICATION_JSON); builder.addBinaryBody("applicationZip", applicationPackage.zippedContent()); return builder.build(); } private String deployOptions(Optional<Long> screwdriverJobId) { if (screwdriverJobId.isPresent()) return "{\"vespaVersion\":null," + "\"ignoreValidationErrors\":false," + "\"screwdriverBuildJob\":{\"screwdriverId\":\"" + screwdriverJobId.get() + "\"," + "\"gitRevision\":{\"repository\":\"repository1\"," + "\"branch\":\"master\"," + "\"commit\":\"commit1\"" + "}" + "}" + "}"; else return "{\"vespaVersion\":null," + "\"ignoreValidationErrors\":false," + "\"screwdriverBuildJob\":{\"screwdriverId\":null," + "\"gitRevision\":{\"repository\":null," + "\"branch\":null," + "\"commit\":null" + "}" + "}" + "}"; } private static class RequestBuilder implements Supplier<Request> { private final String path; private final Request.Method method; private byte[] data = new byte[0]; private String domain = "domain1"; private String user = "mytenant"; private String contentType = "application/json"; private String recursive; private RequestBuilder(String path, Request.Method method) { this.path = path; this.method = method; } private RequestBuilder data(byte[] data) { this.data = data; return this; } private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); } private RequestBuilder data(HttpEntity data) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try { data.writeTo(out); } catch (IOException e) { throw new UncheckedIOException(e); } return data(out.toByteArray()).contentType(data.getContentType().getValue()); } private RequestBuilder domain(String domain) { this.domain = domain; return this; } private RequestBuilder user(String user) { this.user = user; return this; } private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; } private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; } @Override public Request get() { Request request = new Request("http: "?domain=" + domain + (user == null ? "" : "&user=" + user) + (recursive == null ? "" : "&recursive=" + recursive), data, method); request.getHeaders().put("Content-Type", contentType); return request; } } /** Make a request with (athens) user domain1.mytenant */ private RequestBuilder request(String path, Request.Method method) { return new RequestBuilder(path, method); } /** * In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the * mock setup to replicate the action. */ private AthenzDomain addTenantAthenzDomain(String domainName, String userName) { AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components() .getComponent(AthenzClientFactoryMock.class.getName()); AthenzDomain athensDomain = new AthenzDomain(domainName); AthenzDbMock.Domain domain = new AthenzDbMock.Domain(athensDomain); domain.markAsVespaTenant(); domain.admin(AthenzUtils.createPrincipal(new UserId(userName))); mock.getSetup().addDomain(domain); return athensDomain; } /** * In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the * mock setup to replicate the action. */ private void addScrewdriverUserToDomain(String screwdriverUserId, String domainName) { AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components() .getComponent(AthenzClientFactoryMock.class.getName()); AthenzDbMock.Domain domain = mock.getSetup().domains.get(new AthenzDomain(domainName)); domain.admin(new AthenzPrincipal(new AthenzDomain(athenzScrewdriverDomain), new UserId(screwdriverUserId))); } private void startAndTestChange(ContainerControllerTester controllerTester, ApplicationId application, long projectId, HttpEntity deployData) throws IOException { ContainerTester tester = controllerTester.containerTester(); controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.component); String testPath = String.format("/application/v4/tenant/%s/application/%s/environment/test/region/us-east-1/instance/default", application.tenant().value(), application.application().value()); tester.assertResponse(request(testPath, POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request(testPath, DELETE), "Deactivated " + testPath.replaceFirst("/application/v4/", "")); controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.systemTest); String stagingPath = String.format("/application/v4/tenant/%s/application/%s/environment/staging/region/us-east-3/instance/default", application.tenant().value(), application.application().value()); tester.assertResponse(request(stagingPath, POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request(stagingPath, DELETE), "Deactivated " + stagingPath.replaceFirst("/application/v4/", "")); controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.stagingTest); } /** * Cluster info, utilization and deployment metrics are maintained async by maintainers. * * This sets these values as if the maintainers has been ran. * * @param controllerTester */ private void setDeploymentMaintainedInfo(ContainerControllerTester controllerTester) { for (Application application : controllerTester.controller().applications().asList()) { try (Lock lock = controllerTester.controller().applications().lock(application.id())) { LockedApplication lockedApplication = controllerTester.controller().applications() .require(application.id(), lock); for (Deployment deployment : application.deployments().values()) { Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>(); List<String> hostnames = new ArrayList<>(); hostnames.add("host1"); hostnames.add("host2"); clusterInfo.put(ClusterSpec.Id.from("cluster1"), new ClusterInfo("flavor1", 37, 2, 4, 50, ClusterSpec.Type.content, hostnames)); Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>(); clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3)); DeploymentMetrics metrics = new DeploymentMetrics(1,2,3,4,5); controllerTester.controller().applications().store(lockedApplication .withClusterInfo(deployment.zone(), clusterInfo) .withClusterUtilization(deployment.zone(), clusterUtils) .with(deployment.zone(), metrics)); } } } } private void addPropertyData(MockOrganization organization, String propertyIdValue) { PropertyId propertyId = new PropertyId(propertyIdValue); organization.addProperty(propertyId); organization.setContactsFor(propertyId, Arrays.asList(Collections.singletonList(User.from("alice")), Collections.singletonList(User.from("bob")))); } }
class ApplicationApiTest extends ControllerContainerTest { private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/"; private static final ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .build(); private static final String athenzUserDomain = "domain1"; private static final String athenzScrewdriverDomain = AthenzUtils.SCREWDRIVER_DOMAIN.id(); @Test @Test public void testDeployDirectly() throws Exception { ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles); ContainerTester tester = controllerTester.containerTester(); tester.updateSystemVersion(); addTenantAthenzDomain(athenzUserDomain, "mytenant"); addScrewdriverUserToDomain("screwdriveruser1", "domain1"); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/deploy", POST) .data(entity) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); } @Test public void testSortsDeploymentsAndJobs() throws Exception { ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles); ContainerTester tester = controllerTester.containerTester(); tester.updateSystemVersion(); addTenantAthenzDomain(athenzUserDomain, "mytenant"); addScrewdriverUserToDomain("screwdriveruser1", "domain1"); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); ApplicationId id = ApplicationId.from("tenant1", "application1", "default"); long projectId = 1; HttpEntity deployData = createApplicationDeployData(applicationPackage, Optional.of(projectId)); startAndTestChange(controllerTester, id, projectId, deployData); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy", POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsEast3); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); deployData = createApplicationDeployData(applicationPackage, Optional.of(projectId)); startAndTestChange(controllerTester, id, projectId, deployData); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsWest1); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy", POST) .data(deployData).domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsEast3); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET), new File("application-without-change-multiple-deployments.json")); } @Test public void testErrorResponses() throws Exception { ContainerTester tester = new ContainerTester(container, responseFiles); tester.updateSystemVersion(); addTenantAthenzDomain("domain1", "mytenant"); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1", GET), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET), "{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET), "{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant2", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}", 400); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}", 400); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), new File("application-reference.json")); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"An application with id 'tenant1.application1' already exists\"}", 400); ConfigServerClientMock configServer = (ConfigServerClientMock)container.components().getComponent("com.yahoo.vespa.hosted.controller.ConfigServerClientMock"); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, null)); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-failure.json"), 400); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.OUT_OF_CAPACITY, null)); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-out-of-capacity.json"), 400); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to activate application", ConfigServerException.ErrorCode.ACTIVATION_CONFLICT, null)); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-activation-conflict.json"), 409); configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Internal server error", ConfigServerException.ErrorCode.INTERNAL_SERVER_ERROR, null)); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), new File("deploy-internal-server-error.json"), 500); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}", 400); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE), ""); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete application 'tenant1.application1': Application not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), new File("tenant-without-applications.json")); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE), "{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete tenant 'tenant1': Tenant not found\"}", 404); tester.assertResponse(request("/application/v4/tenant/dontexist/application/dontexist/environment/prod/region/us-west-1/instance/default/promote", POST), "{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"Unable to promote Chef environments for application\"}", 500); } @Test public void testAuthorization() throws Exception { ContainerTester tester = new ContainerTester(container, responseFiles); String authorizedUser = "mytenant"; String unauthorizedUser = "othertenant"; tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(null), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User is not authenticated\"}", 403); tester.assertResponse(request("/application/v4/tenant/", GET) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(null), "[]", 200); addTenantAthenzDomain("domain1", "mytenant"); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'othertenant' is not admin in Athenz domain 'domain1'\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1", POST) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(authorizedUser), new File("tenant-without-applications.json"), 200); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST) .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST) .domain("domain1").user(authorizedUser), new File("application-reference.json"), 200); HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty()); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST) .data(entity) .domain(athenzUserDomain).user("mytenant"), "{\"error-code\":\"FORBIDDEN\",\"message\":\"Principal 'mytenant' is not a Screwdriver principal. Excepted principal with Athenz domain 'cd.screwdriver.project', got 'domain1'.\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE) .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE) .domain("domain1").user(authorizedUser), "", 200); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}") .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); addTenantAthenzDomain("domain2", "mytenant"); tester.assertResponse(request("/application/v4/tenant/tenant1", PUT) .data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}") .domain("domain1").user(authorizedUser), "{\"tenant\":\"tenant1\",\"type\":\"ATHENS\",\"athensDomain\":\"domain2\",\"property\":\"property1\",\"applications\":[]}", 200); tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE) .domain("domain1").user(unauthorizedUser), "{\"error-code\":\"FORBIDDEN\",\"message\":\"User othertenant does not have write access to tenant tenant1\"}", 403); } private HttpEntity createApplicationDeployData(ApplicationPackage applicationPackage, Optional<Long> screwdriverJobId) { MultipartEntityBuilder builder = MultipartEntityBuilder.create(); builder.addTextBody("deployOptions", deployOptions(screwdriverJobId), ContentType.APPLICATION_JSON); builder.addBinaryBody("applicationZip", applicationPackage.zippedContent()); return builder.build(); } private String deployOptions(Optional<Long> screwdriverJobId) { if (screwdriverJobId.isPresent()) return "{\"vespaVersion\":null," + "\"ignoreValidationErrors\":false," + "\"screwdriverBuildJob\":{\"screwdriverId\":\"" + screwdriverJobId.get() + "\"," + "\"gitRevision\":{\"repository\":\"repository1\"," + "\"branch\":\"master\"," + "\"commit\":\"commit1\"" + "}" + "}" + "}"; else return "{\"vespaVersion\":null," + "\"ignoreValidationErrors\":false," + "\"screwdriverBuildJob\":{\"screwdriverId\":null," + "\"gitRevision\":{\"repository\":null," + "\"branch\":null," + "\"commit\":null" + "}" + "}" + "}"; } private static class RequestBuilder implements Supplier<Request> { private final String path; private final Request.Method method; private byte[] data = new byte[0]; private String domain = "domain1"; private String user = "mytenant"; private String contentType = "application/json"; private String recursive; private RequestBuilder(String path, Request.Method method) { this.path = path; this.method = method; } private RequestBuilder data(byte[] data) { this.data = data; return this; } private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); } private RequestBuilder data(HttpEntity data) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try { data.writeTo(out); } catch (IOException e) { throw new UncheckedIOException(e); } return data(out.toByteArray()).contentType(data.getContentType().getValue()); } private RequestBuilder domain(String domain) { this.domain = domain; return this; } private RequestBuilder user(String user) { this.user = user; return this; } private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; } private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; } @Override public Request get() { Request request = new Request("http: "?domain=" + domain + (user == null ? "" : "&user=" + user) + (recursive == null ? "" : "&recursive=" + recursive), data, method); request.getHeaders().put("Content-Type", contentType); return request; } } /** Make a request with (athens) user domain1.mytenant */ private RequestBuilder request(String path, Request.Method method) { return new RequestBuilder(path, method); } /** * In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the * mock setup to replicate the action. */ private AthenzDomain addTenantAthenzDomain(String domainName, String userName) { AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components() .getComponent(AthenzClientFactoryMock.class.getName()); AthenzDomain athensDomain = new AthenzDomain(domainName); AthenzDbMock.Domain domain = new AthenzDbMock.Domain(athensDomain); domain.markAsVespaTenant(); domain.admin(AthenzUtils.createPrincipal(new UserId(userName))); mock.getSetup().addDomain(domain); return athensDomain; } /** * In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the * mock setup to replicate the action. */ private void addScrewdriverUserToDomain(String screwdriverUserId, String domainName) { AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components() .getComponent(AthenzClientFactoryMock.class.getName()); AthenzDbMock.Domain domain = mock.getSetup().domains.get(new AthenzDomain(domainName)); domain.admin(new AthenzPrincipal(new AthenzDomain(athenzScrewdriverDomain), new UserId(screwdriverUserId))); } private void startAndTestChange(ContainerControllerTester controllerTester, ApplicationId application, long projectId, HttpEntity deployData) throws IOException { ContainerTester tester = controllerTester.containerTester(); controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.component); String testPath = String.format("/application/v4/tenant/%s/application/%s/environment/test/region/us-east-1/instance/default", application.tenant().value(), application.application().value()); tester.assertResponse(request(testPath, POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request(testPath, DELETE), "Deactivated " + testPath.replaceFirst("/application/v4/", "")); controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.systemTest); String stagingPath = String.format("/application/v4/tenant/%s/application/%s/environment/staging/region/us-east-3/instance/default", application.tenant().value(), application.application().value()); tester.assertResponse(request(stagingPath, POST) .data(deployData) .domain(athenzScrewdriverDomain).user("screwdriveruser1"), new File("deploy-result.json")); tester.assertResponse(request(stagingPath, DELETE), "Deactivated " + stagingPath.replaceFirst("/application/v4/", "")); controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.stagingTest); } /** * Cluster info, utilization and deployment metrics are maintained async by maintainers. * * This sets these values as if the maintainers has been ran. * * @param controllerTester */ private void setDeploymentMaintainedInfo(ContainerControllerTester controllerTester) { for (Application application : controllerTester.controller().applications().asList()) { try (Lock lock = controllerTester.controller().applications().lock(application.id())) { LockedApplication lockedApplication = controllerTester.controller().applications() .require(application.id(), lock); for (Deployment deployment : application.deployments().values()) { Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>(); List<String> hostnames = new ArrayList<>(); hostnames.add("host1"); hostnames.add("host2"); clusterInfo.put(ClusterSpec.Id.from("cluster1"), new ClusterInfo("flavor1", 37, 2, 4, 50, ClusterSpec.Type.content, hostnames)); Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>(); clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3)); DeploymentMetrics metrics = new DeploymentMetrics(1,2,3,4,5); controllerTester.controller().applications().store(lockedApplication .withClusterInfo(deployment.zone(), clusterInfo) .withClusterUtilization(deployment.zone(), clusterUtils) .with(deployment.zone(), metrics)); } } } } private void addPropertyData(MockOrganization organization, String propertyIdValue) { PropertyId propertyId = new PropertyId(propertyIdValue); organization.addProperty(propertyId); organization.setContactsFor(propertyId, Arrays.asList(Collections.singletonList(User.from("alice")), Collections.singletonList(User.from("bob")))); } }
UAR.values().stream().filter(r -> r.statusCode == statusCode).findAny() ?
private UserAuthenticationResult fromHttpRequest(DiscFilterRequest request) { if (!request.containsAttribute(userAuthenticationPassThruAttribute)) { throw new IllegalStateException("User authentication filter passthru attribute missing"); } Integer statusCode = (Integer) request.getAttribute(userAuthenticationPassThruAttribute); for (UserAuthenticationResult result : UserAuthenticationResult.values()) { if (result.statusCode == statusCode) { return result; } } throw new IllegalStateException("Invalid status code: " + statusCode); }
for (UserAuthenticationResult result : UserAuthenticationResult.values()) {
private UserAuthenticationResult fromHttpRequest(DiscFilterRequest request) { if (!request.containsAttribute(userAuthenticationPassThruAttribute)) { throw new IllegalStateException("User authentication filter passthru attribute missing"); } Integer statusCode = (Integer) request.getAttribute(userAuthenticationPassThruAttribute); return Stream.of(UserAuthenticationResult.values()) .filter(uar -> uar.statusCode == statusCode) .findAny() .orElseThrow(() -> new IllegalStateException("Invalid status code: " + statusCode)); }
class UserAuthWithAthenzPrincipalFilter extends AthenzPrincipalFilter { private final String userAuthenticationPassThruAttribute; @Inject public UserAuthWithAthenzPrincipalFilter(ZmsKeystore zmsKeystore, Executor executor, AthenzConfig config) { super(zmsKeystore, executor, config); this.userAuthenticationPassThruAttribute = config.userAuthenticationPassThruAttribute(); } @Override public void filter(DiscFilterRequest request, ResponseHandler responseHandler) { if (request.getMethod().equals("OPTIONS")) return; switch (fromHttpRequest(request)) { case USER_COOKIE_MISSING: case USER_COOKIE_ALTERNATIVE_MISSING: super.filter(request, responseHandler); break; case USER_COOKIE_OK: return; case USER_COOKIE_INVALID: sendUnauthorized(responseHandler, "Your user cookie is invalid (either expired or tampered)"); break; } } private enum UserAuthenticationResult { USER_COOKIE_MISSING(0), USER_COOKIE_OK(1), USER_COOKIE_INVALID(-1), USER_COOKIE_ALTERNATIVE_MISSING(-2); final int statusCode; UserAuthenticationResult(int statusCode) { this.statusCode = statusCode; } } }
class UserAuthWithAthenzPrincipalFilter extends AthenzPrincipalFilter { private final String userAuthenticationPassThruAttribute; @Inject public UserAuthWithAthenzPrincipalFilter(ZmsKeystore zmsKeystore, Executor executor, AthenzConfig config) { super(zmsKeystore, executor, config); this.userAuthenticationPassThruAttribute = config.userAuthenticationPassThruAttribute(); } @Override public void filter(DiscFilterRequest request, ResponseHandler responseHandler) { if (request.getMethod().equals("OPTIONS")) return; switch (fromHttpRequest(request)) { case USER_COOKIE_MISSING: case USER_COOKIE_ALTERNATIVE_MISSING: super.filter(request, responseHandler); break; case USER_COOKIE_OK: return; case USER_COOKIE_INVALID: sendUnauthorized(responseHandler, "Your user cookie is invalid (either expired or tampered)"); break; } } private enum UserAuthenticationResult { USER_COOKIE_MISSING(0), USER_COOKIE_OK(1), USER_COOKIE_INVALID(-1), USER_COOKIE_ALTERNATIVE_MISSING(-2); final int statusCode; UserAuthenticationResult(int statusCode) { this.statusCode = statusCode; } } }
// Athenz domain is set, so ...
private void validateAthenz() { if (! athenzDomain.isPresent()) { for (DeclaredZone zone : zones()) { if(zone.athenzService().isPresent()) { throw new IllegalArgumentException("Athenz service configured for zone: " + zone + ", but Athenz domain is not configured"); } } } else if(! athenzService.isPresent()) { for (DeclaredZone zone : zones()) { if(! zone.athenzService().isPresent()) { throw new IllegalArgumentException("Athenz domain is configured, but Athenz service not configured for zone: " + zone); } } } }
private void validateAthenz() { if (! athenzDomain.isPresent()) { for (DeclaredZone zone : zones()) { if(zone.athenzService().isPresent()) { throw new IllegalArgumentException("Athenz service configured for zone: " + zone + ", but Athenz domain is not configured"); } } } else if(! athenzService.isPresent()) { for (DeclaredZone zone : zones()) { if(! zone.athenzService().isPresent()) { throw new IllegalArgumentException("Athenz domain is configured, but Athenz service not configured for zone: " + zone); } } } }
class DeploymentSpec { /** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */ public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(), UpgradePolicy.defaultPolicy, Collections.emptyList(), Collections.emptyList(), "<deployment version='1.0'/>", Optional.empty(), Optional.empty()); private final Optional<String> globalServiceId; private final UpgradePolicy upgradePolicy; private final List<ChangeBlocker> changeBlockers; private final List<Step> steps; private final String xmlForm; private final Optional<String> athenzDomain; private final Optional<String> athenzService; public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<ChangeBlocker> changeBlockers, List<Step> steps) { this(globalServiceId, upgradePolicy, changeBlockers, steps, null, Optional.empty(), Optional.empty()); } public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm, Optional<String> athenzDomain, Optional<String> athenzService) { validateTotalDelay(steps); this.globalServiceId = globalServiceId; this.upgradePolicy = upgradePolicy; this.changeBlockers = changeBlockers; this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps))); this.xmlForm = xmlForm; this.athenzDomain = athenzDomain; this.athenzService = athenzService; validateZones(this.steps); validateAthenz(); } /** Throw an IllegalArgumentException if the total delay exceeds 24 hours */ private void validateTotalDelay(List<Step> steps) { long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay) .mapToLong(delay -> ((Delay)delay).duration().getSeconds()) .sum(); if (totalDelaySeconds > Duration.ofHours(24).getSeconds()) throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) + " but max 24 hours is allowed"); } /** Throw an IllegalArgumentException if any production zone is declared multiple times */ private void validateZones(List<Step> steps) { Set<DeclaredZone> zones = new HashSet<>(); for (Step step : steps) for (DeclaredZone zone : step.zones()) ensureUnique(zone, zones); } /* * Throw an IllegalArgumentException if Athenz configuration violates: * domain not configured -> no zone can configure service * domain configured -> all zones must configure service */ private void ensureUnique(DeclaredZone zone, Set<DeclaredZone> zones) { if ( ! zones.add(zone)) throw new IllegalArgumentException(zone + " is listed twice in deployment.xml"); } /** Adds missing required steps and reorders steps to a permissible order */ private static List<Step> completeSteps(List<Step> steps) { if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) && steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) { steps.add(new DeclaredZone(Environment.staging)); } if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) && steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) { steps.add(new DeclaredZone(Environment.test)); } DeclaredZone testStep = remove(Environment.test, steps); if (testStep != null) steps.add(0, testStep); DeclaredZone stagingStep = remove(Environment.staging, steps); if (stagingStep != null) steps.add(1, stagingStep); return steps; } /** * Removes the first occurrence of a deployment step to the given environment and returns it. * * @return the removed step, or null if it is not present */ private static DeclaredZone remove(Environment environment, List<Step> steps) { for (int i = 0; i < steps.size(); i++) { if (steps.get(i).deploysTo(environment)) return (DeclaredZone)steps.remove(i); } return null; } /** Returns the ID of the service to expose through global routing, if present */ public Optional<String> globalServiceId() { return globalServiceId; } /** Returns the upgrade policy of this, which is defaultPolicy if none is specified */ public UpgradePolicy upgradePolicy() { return upgradePolicy; } /** Returns whether upgrade can occur at the given instant */ public boolean canUpgradeAt(Instant instant) { return changeBlockers.stream().filter(block -> block.blocksVersions()) .noneMatch(block -> block.window().includes(instant)); } /** Returns whether an application revision change can occur at the given instant */ public boolean canChangeRevisionAt(Instant instant) { return changeBlockers.stream().filter(block -> block.blocksRevisions()) .noneMatch(block -> block.window().includes(instant)); } /** Returns time windows where upgrades are disallowed */ public List<ChangeBlocker> changeBlocker() { return changeBlockers; } /** Returns the deployment steps of this in the order they will be performed */ public List<Step> steps() { return steps; } /** Returns all the DeclaredZone deployment steps in the order they are declared */ public List<DeclaredZone> zones() { return steps.stream() .flatMap(step -> step.zones().stream()) .collect(Collectors.toList()); } /** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */ public String xmlForm() { return xmlForm; } /** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */ public boolean includes(Environment environment, Optional<RegionName> region) { for (Step step : steps) if (step.deploysTo(environment, region)) return true; return false; } /** * Creates a deployment spec from XML. * * @throws IllegalArgumentException if the XML is invalid */ public static DeploymentSpec fromXml(Reader reader) { return new DeploymentSpecXmlReader().read(reader); } /** * Creates a deployment spec from XML. * * @throws IllegalArgumentException if the XML is invalid */ public static DeploymentSpec fromXml(String xmlForm) { return fromXml(xmlForm, true); } /** * Creates a deployment spec from XML. * * @throws IllegalArgumentException if the XML is invalid */ public static DeploymentSpec fromXml(String xmlForm, boolean validate) { return new DeploymentSpecXmlReader(validate).read(xmlForm); } public static String toMessageString(Throwable t) { StringBuilder b = new StringBuilder(); String lastMessage = null; String message; for (; t != null; t = t.getCause()) { message = t.getMessage(); if (message == null) continue; if (message.equals(lastMessage)) continue; if (b.length() > 0) { b.append(": "); } b.append(message); lastMessage = message; } return b.toString(); } /** Returns the athenz domain if configured */ public Optional<String> athenzDomain() { return athenzDomain; } /** Returns the athenz service for environment/region if configured */ public Optional<String> athenzService(Environment environment, RegionName region) { return zones().stream() .filter(zone -> zone.deploysTo(environment, Optional.of(region))) .findFirst() .map(DeclaredZone::athenzService) .orElse(athenzService); } /** This may be invoked by a continuous build */ public static void main(String[] args) { if (args.length != 2 && args.length != 3) { System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" + "Returns 0 if the specified zone matches the deployment spec, 1 otherwise"); System.exit(1); } try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) { DeploymentSpec spec = DeploymentSpec.fromXml(reader); Environment environment = Environment.from(args[1]); Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty(); if (spec.includes(environment, region)) System.exit(0); else System.exit(1); } catch (Exception e) { System.err.println("Exception checking deployment spec: " + toMessageString(e)); System.exit(1); } } /** A deployment step */ public abstract static class Step { /** Returns whether this step deploys to the given region */ public final boolean deploysTo(Environment environment) { return deploysTo(environment, Optional.empty()); } /** Returns whether this step deploys to the given environment, and (if specified) region */ public abstract boolean deploysTo(Environment environment, Optional<RegionName> region); /** Returns the zones deployed to in this step */ public List<DeclaredZone> zones() { return Collections.emptyList(); } } /** A deployment step which is to wait for some time before progressing to the next step */ public static class Delay extends Step { private final Duration duration; public Delay(Duration duration) { this.duration = duration; } public Duration duration() { return duration; } @Override public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; } } /** A deployment step which is to run deployment in a particular zone */ public static class DeclaredZone extends Step { private final Environment environment; private Optional<RegionName> region; private final boolean active; private Optional<String> athenzService; public DeclaredZone(Environment environment) { this(environment, Optional.empty(), false); } public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) { this(environment, region, active, Optional.empty()); } public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active, Optional<String> athenzService) { if (environment != Environment.prod && region.isPresent()) throw new IllegalArgumentException("Non-prod environments cannot specify a region"); if (environment == Environment.prod && ! region.isPresent()) throw new IllegalArgumentException("Prod environments must be specified with a region"); this.environment = environment; this.region = region; this.active = active; this.athenzService = athenzService; } public Environment environment() { return environment; } /** The region name, or empty if not declared */ public Optional<RegionName> region() { return region; } /** Returns whether this zone should receive production traffic */ public boolean active() { return active; } public Optional<String> athenzService() { return athenzService; } @Override public List<DeclaredZone> zones() { return Collections.singletonList(this); } @Override public boolean deploysTo(Environment environment, Optional<RegionName> region) { if (environment != this.environment) return false; if (region.isPresent() && ! region.equals(this.region)) return false; return true; } @Override public int hashCode() { return Objects.hash(environment, region); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof DeclaredZone)) return false; DeclaredZone other = (DeclaredZone)o; if (this.environment != other.environment) return false; if ( ! this.region.equals(other.region())) return false; return true; } @Override public String toString() { return environment + ( region.isPresent() ? "." + region.get() : ""); } } /** A deployment step which is to run deployment to multiple zones in parallel */ public static class ParallelZones extends Step { private final List<DeclaredZone> zones; public ParallelZones(List<DeclaredZone> zones) { this.zones = ImmutableList.copyOf(zones); } @Override public List<DeclaredZone> zones() { return this.zones; } @Override public boolean deploysTo(Environment environment, Optional<RegionName> region) { return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region)); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ParallelZones)) return false; ParallelZones that = (ParallelZones) o; return Objects.equals(zones, that.zones); } @Override public int hashCode() { return Objects.hash(zones); } } /** Controls when this application will be upgraded to new Vespa versions */ public enum UpgradePolicy { /** Canary: Applications with this policy will upgrade before any other */ canary, /** Default: Will upgrade after all canary applications upgraded successfully. The default. */ defaultPolicy, /** Will upgrade after most default applications upgraded successfully */ conservative } /** A blocking of changes in a given time window */ public static class ChangeBlocker { private final boolean revision; private final boolean version; private final TimeWindow window; public ChangeBlocker(boolean revision, boolean version, TimeWindow window) { this.revision = revision; this.version = version; this.window = window; } public boolean blocksRevisions() { return revision; } public boolean blocksVersions() { return version; } public TimeWindow window() { return window; } } }
class DeploymentSpec { /** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */ public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(), UpgradePolicy.defaultPolicy, Collections.emptyList(), Collections.emptyList(), "<deployment version='1.0'/>", Optional.empty(), Optional.empty()); private final Optional<String> globalServiceId; private final UpgradePolicy upgradePolicy; private final List<ChangeBlocker> changeBlockers; private final List<Step> steps; private final String xmlForm; private final Optional<AthenzDomain> athenzDomain; private final Optional<AthenzService> athenzService; public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<ChangeBlocker> changeBlockers, List<Step> steps) { this(globalServiceId, upgradePolicy, changeBlockers, steps, null, Optional.empty(), Optional.empty()); } public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { validateTotalDelay(steps); this.globalServiceId = globalServiceId; this.upgradePolicy = upgradePolicy; this.changeBlockers = changeBlockers; this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps))); this.xmlForm = xmlForm; this.athenzDomain = athenzDomain; this.athenzService = athenzService; validateZones(this.steps); validateAthenz(); } /** Throw an IllegalArgumentException if the total delay exceeds 24 hours */ private void validateTotalDelay(List<Step> steps) { long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay) .mapToLong(delay -> ((Delay)delay).duration().getSeconds()) .sum(); if (totalDelaySeconds > Duration.ofHours(24).getSeconds()) throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) + " but max 24 hours is allowed"); } /** Throw an IllegalArgumentException if any production zone is declared multiple times */ private void validateZones(List<Step> steps) { Set<DeclaredZone> zones = new HashSet<>(); for (Step step : steps) for (DeclaredZone zone : step.zones()) ensureUnique(zone, zones); } /* * Throw an IllegalArgumentException if Athenz configuration violates: * domain not configured -> no zone can configure service * domain configured -> all zones must configure service */ private void ensureUnique(DeclaredZone zone, Set<DeclaredZone> zones) { if ( ! zones.add(zone)) throw new IllegalArgumentException(zone + " is listed twice in deployment.xml"); } /** Adds missing required steps and reorders steps to a permissible order */ private static List<Step> completeSteps(List<Step> steps) { if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) && steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) { steps.add(new DeclaredZone(Environment.staging)); } if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) && steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) { steps.add(new DeclaredZone(Environment.test)); } DeclaredZone testStep = remove(Environment.test, steps); if (testStep != null) steps.add(0, testStep); DeclaredZone stagingStep = remove(Environment.staging, steps); if (stagingStep != null) steps.add(1, stagingStep); return steps; } /** * Removes the first occurrence of a deployment step to the given environment and returns it. * * @return the removed step, or null if it is not present */ private static DeclaredZone remove(Environment environment, List<Step> steps) { for (int i = 0; i < steps.size(); i++) { if (steps.get(i).deploysTo(environment)) return (DeclaredZone)steps.remove(i); } return null; } /** Returns the ID of the service to expose through global routing, if present */ public Optional<String> globalServiceId() { return globalServiceId; } /** Returns the upgrade policy of this, which is defaultPolicy if none is specified */ public UpgradePolicy upgradePolicy() { return upgradePolicy; } /** Returns whether upgrade can occur at the given instant */ public boolean canUpgradeAt(Instant instant) { return changeBlockers.stream().filter(block -> block.blocksVersions()) .noneMatch(block -> block.window().includes(instant)); } /** Returns whether an application revision change can occur at the given instant */ public boolean canChangeRevisionAt(Instant instant) { return changeBlockers.stream().filter(block -> block.blocksRevisions()) .noneMatch(block -> block.window().includes(instant)); } /** Returns time windows where upgrades are disallowed */ public List<ChangeBlocker> changeBlocker() { return changeBlockers; } /** Returns the deployment steps of this in the order they will be performed */ public List<Step> steps() { return steps; } /** Returns all the DeclaredZone deployment steps in the order they are declared */ public List<DeclaredZone> zones() { return steps.stream() .flatMap(step -> step.zones().stream()) .collect(Collectors.toList()); } /** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */ public String xmlForm() { return xmlForm; } /** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */ public boolean includes(Environment environment, Optional<RegionName> region) { for (Step step : steps) if (step.deploysTo(environment, region)) return true; return false; } /** * Creates a deployment spec from XML. * * @throws IllegalArgumentException if the XML is invalid */ public static DeploymentSpec fromXml(Reader reader) { return new DeploymentSpecXmlReader().read(reader); } /** * Creates a deployment spec from XML. * * @throws IllegalArgumentException if the XML is invalid */ public static DeploymentSpec fromXml(String xmlForm) { return fromXml(xmlForm, true); } /** * Creates a deployment spec from XML. * * @throws IllegalArgumentException if the XML is invalid */ public static DeploymentSpec fromXml(String xmlForm, boolean validate) { return new DeploymentSpecXmlReader(validate).read(xmlForm); } public static String toMessageString(Throwable t) { StringBuilder b = new StringBuilder(); String lastMessage = null; String message; for (; t != null; t = t.getCause()) { message = t.getMessage(); if (message == null) continue; if (message.equals(lastMessage)) continue; if (b.length() > 0) { b.append(": "); } b.append(message); lastMessage = message; } return b.toString(); } /** Returns the athenz domain if configured */ public Optional<AthenzDomain> athenzDomain() { return athenzDomain; } /** Returns the athenz service for environment/region if configured */ public Optional<AthenzService> athenzService(Environment environment, RegionName region) { AthenzService athenzService = zones().stream() .filter(zone -> zone.deploysTo(environment, Optional.of(region))) .findFirst() .flatMap(DeclaredZone::athenzService) .orElse(this.athenzService.orElse(null)); return Optional.ofNullable(athenzService); } /** This may be invoked by a continuous build */ public static void main(String[] args) { if (args.length != 2 && args.length != 3) { System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" + "Returns 0 if the specified zone matches the deployment spec, 1 otherwise"); System.exit(1); } try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) { DeploymentSpec spec = DeploymentSpec.fromXml(reader); Environment environment = Environment.from(args[1]); Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty(); if (spec.includes(environment, region)) System.exit(0); else System.exit(1); } catch (Exception e) { System.err.println("Exception checking deployment spec: " + toMessageString(e)); System.exit(1); } } /** A deployment step */ public abstract static class Step { /** Returns whether this step deploys to the given region */ public final boolean deploysTo(Environment environment) { return deploysTo(environment, Optional.empty()); } /** Returns whether this step deploys to the given environment, and (if specified) region */ public abstract boolean deploysTo(Environment environment, Optional<RegionName> region); /** Returns the zones deployed to in this step */ public List<DeclaredZone> zones() { return Collections.emptyList(); } } /** A deployment step which is to wait for some time before progressing to the next step */ public static class Delay extends Step { private final Duration duration; public Delay(Duration duration) { this.duration = duration; } public Duration duration() { return duration; } @Override public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; } } /** A deployment step which is to run deployment in a particular zone */ public static class DeclaredZone extends Step { private final Environment environment; private Optional<RegionName> region; private final boolean active; private Optional<AthenzService> athenzService; public DeclaredZone(Environment environment) { this(environment, Optional.empty(), false); } public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) { this(environment, region, active, Optional.empty()); } public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active, Optional<AthenzService> athenzService) { if (environment != Environment.prod && region.isPresent()) throw new IllegalArgumentException("Non-prod environments cannot specify a region"); if (environment == Environment.prod && ! region.isPresent()) throw new IllegalArgumentException("Prod environments must be specified with a region"); this.environment = environment; this.region = region; this.active = active; this.athenzService = athenzService; } public Environment environment() { return environment; } /** The region name, or empty if not declared */ public Optional<RegionName> region() { return region; } /** Returns whether this zone should receive production traffic */ public boolean active() { return active; } public Optional<AthenzService> athenzService() { return athenzService; } @Override public List<DeclaredZone> zones() { return Collections.singletonList(this); } @Override public boolean deploysTo(Environment environment, Optional<RegionName> region) { if (environment != this.environment) return false; if (region.isPresent() && ! region.equals(this.region)) return false; return true; } @Override public int hashCode() { return Objects.hash(environment, region); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof DeclaredZone)) return false; DeclaredZone other = (DeclaredZone)o; if (this.environment != other.environment) return false; if ( ! this.region.equals(other.region())) return false; return true; } @Override public String toString() { return environment + ( region.isPresent() ? "." + region.get() : ""); } } /** A deployment step which is to run deployment to multiple zones in parallel */ public static class ParallelZones extends Step { private final List<DeclaredZone> zones; public ParallelZones(List<DeclaredZone> zones) { this.zones = ImmutableList.copyOf(zones); } @Override public List<DeclaredZone> zones() { return this.zones; } @Override public boolean deploysTo(Environment environment, Optional<RegionName> region) { return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region)); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ParallelZones)) return false; ParallelZones that = (ParallelZones) o; return Objects.equals(zones, that.zones); } @Override public int hashCode() { return Objects.hash(zones); } } /** Controls when this application will be upgraded to new Vespa versions */ public enum UpgradePolicy { /** Canary: Applications with this policy will upgrade before any other */ canary, /** Default: Will upgrade after all canary applications upgraded successfully. The default. */ defaultPolicy, /** Will upgrade after most default applications upgraded successfully */ conservative } /** A blocking of changes in a given time window */ public static class ChangeBlocker { private final boolean revision; private final boolean version; private final TimeWindow window; public ChangeBlocker(boolean revision, boolean version, TimeWindow window) { this.revision = revision; this.version = version; this.window = window; } public boolean blocksRevisions() { return revision; } public boolean blocksVersions() { return version; } public TimeWindow window() { return window; } } }
This may hide bugs, could we make it more specific?
private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); if (!potentialDimensionName(arg2)) { return node; } try { String dimension = ((ReferenceNode) arg2).getName(); Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (verifyTensorAndDimension(value, dimension)) { return replaceMaxAndMinFunction(node); } } catch (Exception e) { } return node; }
} catch (Exception e) {
private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); Optional<String> dimension = dimensionName(node.children().get(1)); if (dimension.isPresent()) { try { Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (isTensorWithDimension(value, dimension.get())) { return replaceMaxAndMinFunction(node); } } catch (IllegalArgumentException e) { } } return node; }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private boolean potentialDimensionName(ExpressionNode arg) { return arg instanceof ReferenceNode && ((ReferenceNode) arg).children().size() == 0; } private boolean verifyTensorAndDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals("constant")) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private Optional<String> dimensionName(ExpressionNode arg) { if (arg instanceof ReferenceNode && ((ReferenceNode)arg).children().size() == 0) { return Optional.of(((ReferenceNode) arg).getName()); } return Optional.empty(); } private boolean isTensorWithDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); if (a == null) { return; } Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals(ConstantTensorTransformer.CONSTANT)) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
Since the next line (the cast) requires knowledge that the internal of this checks instanceof I suggest inlining this.
private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); if (!potentialDimensionName(arg2)) { return node; } try { String dimension = ((ReferenceNode) arg2).getName(); Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (verifyTensorAndDimension(value, dimension)) { return replaceMaxAndMinFunction(node); } } catch (Exception e) { } return node; }
if (!potentialDimensionName(arg2)) {
private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); Optional<String> dimension = dimensionName(node.children().get(1)); if (dimension.isPresent()) { try { Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (isTensorWithDimension(value, dimension.get())) { return replaceMaxAndMinFunction(node); } } catch (IllegalArgumentException e) { } } return node; }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private boolean potentialDimensionName(ExpressionNode arg) { return arg instanceof ReferenceNode && ((ReferenceNode) arg).children().size() == 0; } private boolean verifyTensorAndDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals("constant")) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private Optional<String> dimensionName(ExpressionNode arg) { if (arg instanceof ReferenceNode && ((ReferenceNode)arg).children().size() == 0) { return Optional.of(((ReferenceNode) arg).getName()); } return Optional.empty(); } private boolean isTensorWithDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); if (a == null) { return; } Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals(ConstantTensorTransformer.CONSTANT)) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
Or, alternatively change it to be dimensionName(arg2) and return an Optional which is empty if it isn't a dimension name.
private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); if (!potentialDimensionName(arg2)) { return node; } try { String dimension = ((ReferenceNode) arg2).getName(); Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (verifyTensorAndDimension(value, dimension)) { return replaceMaxAndMinFunction(node); } } catch (Exception e) { } return node; }
if (!potentialDimensionName(arg2)) {
private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); Optional<String> dimension = dimensionName(node.children().get(1)); if (dimension.isPresent()) { try { Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (isTensorWithDimension(value, dimension.get())) { return replaceMaxAndMinFunction(node); } } catch (IllegalArgumentException e) { } } return node; }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private boolean potentialDimensionName(ExpressionNode arg) { return arg instanceof ReferenceNode && ((ReferenceNode) arg).children().size() == 0; } private boolean verifyTensorAndDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals("constant")) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private Optional<String> dimensionName(ExpressionNode arg) { if (arg instanceof ReferenceNode && ((ReferenceNode)arg).children().size() == 0) { return Optional.of(((ReferenceNode) arg).getName()); } return Optional.empty(); } private boolean isTensorWithDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); if (a == null) { return; } Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals(ConstantTensorTransformer.CONSTANT)) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
Perhaps "constant" should be a constant string in ConstantTensorTransformer to make the dependency explicit.
private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals("constant")) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); }
if (!node.getName().equals("constant")) {
private void addIfConstant(ReferenceNode node, Context context) { if (!node.getName().equals(ConstantTensorTransformer.CONSTANT)) { return; } if (node.children().size() != 1) { return; } ExpressionNode child = node.children().get(0); while (child instanceof CompositeNode && ((CompositeNode) child).children().size() > 0) { child = ((CompositeNode) child).children().get(0); } String name = child.toString(); addIfConstantInRankProfile(name, node, context); addIfConstantInRankingConstants(name, node, context); }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); if (!potentialDimensionName(arg2)) { return node; } try { String dimension = ((ReferenceNode) arg2).getName(); Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (verifyTensorAndDimension(value, dimension)) { return replaceMaxAndMinFunction(node); } } catch (Exception e) { } return node; } private boolean potentialDimensionName(ExpressionNode arg) { return arg instanceof ReferenceNode && ((ReferenceNode) arg).children().size() == 0; } private boolean verifyTensorAndDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
class TensorTransformer extends ExpressionTransformer { private Search search; private RankProfile rankprofile; private Map<String, RankProfile.Macro> macros; public TensorTransformer(RankProfile rankprofile) { this.rankprofile = rankprofile; this.search = rankprofile.getSearch(); this.macros = rankprofile.getMacros(); } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof CompositeNode) { node = transformChildren((CompositeNode) node); } if (node instanceof FunctionNode) { node = transformFunctionNode((FunctionNode) node); } return node; } private ExpressionNode transformFunctionNode(FunctionNode node) { switch (node.getFunction()) { case min: case max: return transformMaxAndMinFunctionNode(node); } return node; } /** * Transforms max and min functions if it can be proven that the first * argument resolves to a tensor and the second argument is a valid * dimension in the tensor. If these do not hold, the node will not * be transformed. * * The test for whether or not the first argument resolves to a tensor * is to evaluate that expression. All values used in the expression * is bound to a context with dummy values with enough information to * deduce tensor types. * * There is currently no guarantee that all cases will be found. For * instance, if-statements are problematic. */ private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node) { if (node.children().size() != 2) { return node; } ExpressionNode arg1 = node.children().get(0); Optional<String> dimension = dimensionName(node.children().get(1)); if (dimension.isPresent()) { try { Context context = buildContext(arg1); Value value = arg1.evaluate(context); if (isTensorWithDimension(value, dimension.get())) { return replaceMaxAndMinFunction(node); } } catch (IllegalArgumentException e) { } } return node; } private Optional<String> dimensionName(ExpressionNode arg) { if (arg instanceof ReferenceNode && ((ReferenceNode)arg).children().size() == 0) { return Optional.of(((ReferenceNode) arg).getName()); } return Optional.empty(); } private boolean isTensorWithDimension(Value value, String dimension) { if (value instanceof TensorValue) { Tensor tensor = ((TensorValue) value).asTensor(); TensorType type = tensor.type(); return type.dimensionNames().contains(dimension); } return false; } private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) { ExpressionNode arg1 = node.children().get(0); ExpressionNode arg2 = node.children().get(1); TensorFunctionNode.TensorFunctionExpressionNode expression = TensorFunctionNode.wrapArgument(arg1); Reduce.Aggregator aggregator = Reduce.Aggregator.valueOf(node.getFunction().name()); String dimension = ((ReferenceNode) arg2).getName(); return new TensorFunctionNode(new Reduce(expression, aggregator, dimension)); } /** * Creates an evaluation context by iterating through the expression tree, and * adding dummy values with correct types to the context. */ private Context buildContext(ExpressionNode node) { Context context = new MapContext(); addRoot(node, context); return context; } private Value emptyStringValue() { return new StringValue(""); } private Value emptyDoubleValue() { return new DoubleValue(0.0); } private Value emptyTensorValue(TensorType type) { Tensor empty = Tensor.Builder.of(type).build(); return new TensorValue(empty); } private void addRoot(ExpressionNode node, Context context) { addChildren(node, context); if (node instanceof ReferenceNode) { ReferenceNode referenceNode = (ReferenceNode) node; addIfAttribute(referenceNode, context); addIfConstant(referenceNode, context); addIfQuery(referenceNode, context); addIfTensorFrom(referenceNode, context); addIfMacro(referenceNode, context); } } private void addChildren(ExpressionNode node, Context context) { if (node instanceof CompositeNode) { List<ExpressionNode> children = ((CompositeNode) node).children(); for (ExpressionNode child : children) { addRoot(child, context); } } } private void addIfAttribute(ReferenceNode node, Context context) { if (!node.getName().equals("attribute")) { return; } if (node.children().size() == 0) { return; } String attribute = node.children().get(0).toString(); Attribute a = search.getAttribute(attribute); if (a == null) { return; } Value v; if (a.getType() == Attribute.Type.STRING) { v = emptyStringValue(); } else if (a.getType() == Attribute.Type.TENSOR) { v = emptyTensorValue(a.tensorType().orElseThrow(RuntimeException::new)); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } private void addIfConstantInRankProfile(String name, ReferenceNode node, Context context) { if (rankprofile.getConstants().containsKey(name)) { context.put(node.toString(), rankprofile.getConstants().get(name)); } } private void addIfConstantInRankingConstants(String name, ReferenceNode node, Context context) { for (RankingConstant rankingConstant : search.getRankingConstants()) { if (rankingConstant.getName().equals(name)) { context.put(node.toString(), emptyTensorValue(rankingConstant.getTensorType())); } } } private void addIfQuery(ReferenceNode node, Context context) { if (!node.getName().equals("query")) { return; } if (node.children().size() != 1) { return; } String name = node.children().get(0).toString(); if (rankprofile.getQueryFeatureTypes().containsKey(name)) { String type = rankprofile.getQueryFeatureTypes().get(name); Value v; if (type.contains("tensor")) { v = emptyTensorValue(TensorType.fromSpec(type)); } else if (type.equalsIgnoreCase("string")) { v = emptyStringValue(); } else { v = emptyDoubleValue(); } context.put(node.toString(), v); } } private void addIfTensorFrom(ReferenceNode node, Context context) { if (!node.getName().startsWith("tensorFrom")) { return; } if (node.children().size() < 1 || node.children().size() > 2) { return; } ExpressionNode source = node.children().get(0); if (source instanceof CompositeNode && ((CompositeNode) source).children().size() > 0) { source = ((CompositeNode) source).children().get(0); } String dimension = source.toString(); if (node.children().size() == 2) { dimension = node.children().get(1).toString(); } TensorType type = (new TensorType.Builder()).mapped(dimension).build(); context.put(node.toString(), emptyTensorValue(type)); } private void addIfMacro(ReferenceNode node, Context context) { RankProfile.Macro macro = macros.get(node.getName()); if (macro == null) { return; } ExpressionNode root = macro.getRankingExpression().getRoot(); Context macroContext = buildContext(root); addMacroArguments(node, context, macro, macroContext); Value value = root.evaluate(macroContext); context.put(node.toString(), value); } private void addMacroArguments(ReferenceNode node, Context context, RankProfile.Macro macro, Context macroContext) { if (macro.getFormalParams().size() > 0 && node.children().size() > 0) { for (int i = 0; i < macro.getFormalParams().size() && i < node.children().size(); ++i) { String param = macro.getFormalParams().get(i); ExpressionNode argumentExpression = node.children().get(i); Value arg = argumentExpression.evaluate(context); macroContext.put(param, arg); } } } }
New way to get LockedApplication.
public void lockedIfPresent(ApplicationId applicationId, Consumer<LockedApplication> actions) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(actions); } }
}
public void lockedIfPresent(ApplicationId applicationId, Consumer<LockedApplication> actions) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(actions); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, RotationRepository rotationRepository, AthenzClientFactory zmsClientFactory, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.rotationRepository = rotationRepository; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) lockedIfPresent(application.id(), this::store); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { if (get(id).isPresent()) throw new IllegalArgumentException("An application with id '" + id + "' already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId).map(application1 -> new LockedApplication(application1, lock)).orElse(new LockedApplication( new Application(applicationId), lock) ); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } DeploymentId deploymentId = new DeploymentId(applicationId, zone); ApplicationRotation rotationInDns = registerRotationInDns(deploymentId, getOrAssignRotation(deploymentId, applicationPackage)); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(deploymentId, options, rotationInDns.cnames(), rotationInDns.rotations(), applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } private ApplicationRotation registerRotationInDns(DeploymentId deploymentId, ApplicationRotation applicationRotation) { ApplicationAlias alias = new ApplicationAlias(deploymentId.applicationId()); if (applicationRotation.rotations().isEmpty()) return applicationRotation; Rotation rotation = applicationRotation.rotations().iterator().next(); String endpointName = alias.toString(); try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, endpointName); if (!record.isPresent()) { RecordId recordId = nameService.createCname(endpointName, rotation.rotationName); log.info("Registered mapping with record ID " + recordId.id() + ": " + endpointName + " -> " + rotation.rotationName); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } return new ApplicationRotation(Collections.singleton(endpointName), Collections.singleton(rotation)); } private ApplicationRotation getOrAssignRotation(DeploymentId deploymentId, ApplicationPackage applicationPackage) { if (deploymentId.zone().environment().equals(Environment.prod)) { return new ApplicationRotation(Collections.emptySet(), rotationRepository.getOrAssignRotation(deploymentId.applicationId(), applicationPackage.deploymentSpec())); } else { return new ApplicationRotation(Collections.emptySet(), Collections.emptySet()); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.FINE, "Failed to get endpoint information for " + deploymentId, e); return Optional.empty(); } } /** * Deletes the application with this id * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the application does not exist */ public void deleteApplication(ApplicationId id, Optional<NToken> token) { if ( ! controller.applications().get(id).isPresent()) throw new NotExistsException("Could not delete application '" + id + "': Application not found"); lockedOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId Id of the application to lock and get. * @param actions Things to do with the locked application. */ /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId Id of the application to lock and require. * @param actions Things to do with the locked application. */ public void lockedOrThrow(ApplicationId applicationId, Consumer<LockedApplication> actions) { try (Lock lock = lock(applicationId)) { actions.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } public void restart(DeploymentId deploymentId) { try { configserverClient.restart(deploymentId, Optional.empty()); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } public void restartHost(DeploymentId deploymentId, Hostname hostname) { try { configserverClient.restart(deploymentId, Optional.of(hostname)); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, Zone zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockedOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, Zone zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } private static final class ApplicationRotation { private final ImmutableSet<String> cnames; private final ImmutableSet<Rotation> rotations; public ApplicationRotation(Set<String> cnames, Set<Rotation> rotations) { this.cnames = ImmutableSet.copyOf(cnames); this.rotations = ImmutableSet.copyOf(rotations); } public Set<String> cnames() { return cnames; } public Set<Rotation> rotations() { return rotations; } } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, RotationRepository rotationRepository, AthenzClientFactory zmsClientFactory, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.rotationRepository = rotationRepository; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) lockedIfPresent(application.id(), this::store); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { if (get(id).isPresent()) throw new IllegalArgumentException("An application with id '" + id + "' already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId).map(application1 -> new LockedApplication(application1, lock)).orElse(new LockedApplication( new Application(applicationId), lock) ); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } DeploymentId deploymentId = new DeploymentId(applicationId, zone); ApplicationRotation rotationInDns = registerRotationInDns(deploymentId, getOrAssignRotation(deploymentId, applicationPackage)); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(deploymentId, options, rotationInDns.cnames(), rotationInDns.rotations(), applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } private ApplicationRotation registerRotationInDns(DeploymentId deploymentId, ApplicationRotation applicationRotation) { ApplicationAlias alias = new ApplicationAlias(deploymentId.applicationId()); if (applicationRotation.rotations().isEmpty()) return applicationRotation; Rotation rotation = applicationRotation.rotations().iterator().next(); String endpointName = alias.toString(); try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, endpointName); if (!record.isPresent()) { RecordId recordId = nameService.createCname(endpointName, rotation.rotationName); log.info("Registered mapping with record ID " + recordId.id() + ": " + endpointName + " -> " + rotation.rotationName); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } return new ApplicationRotation(Collections.singleton(endpointName), Collections.singleton(rotation)); } private ApplicationRotation getOrAssignRotation(DeploymentId deploymentId, ApplicationPackage applicationPackage) { if (deploymentId.zone().environment().equals(Environment.prod)) { return new ApplicationRotation(Collections.emptySet(), rotationRepository.getOrAssignRotation(deploymentId.applicationId(), applicationPackage.deploymentSpec())); } else { return new ApplicationRotation(Collections.emptySet(), Collections.emptySet()); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.FINE, "Failed to get endpoint information for " + deploymentId, e); return Optional.empty(); } } /** * Deletes the application with this id * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the application does not exist */ public void deleteApplication(ApplicationId id, Optional<NToken> token) { if ( ! controller.applications().get(id).isPresent()) throw new NotExistsException("Could not delete application '" + id + "': Application not found"); lockedOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId Id of the application to lock and get. * @param actions Things to do with the locked application. */ /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId Id of the application to lock and require. * @param actions Things to do with the locked application. */ public void lockedOrThrow(ApplicationId applicationId, Consumer<LockedApplication> actions) { try (Lock lock = lock(applicationId)) { actions.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } public void restart(DeploymentId deploymentId) { try { configserverClient.restart(deploymentId, Optional.empty()); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } public void restartHost(DeploymentId deploymentId, Hostname hostname) { try { configserverClient.restart(deploymentId, Optional.of(hostname)); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, Zone zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockedOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, Zone zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } private static final class ApplicationRotation { private final ImmutableSet<String> cnames; private final ImmutableSet<Rotation> rotations; public ApplicationRotation(Set<String> cnames, Set<Rotation> rotations) { this.cnames = ImmutableSet.copyOf(cnames); this.rotations = ImmutableSet.copyOf(rotations); } public Set<String> cnames() { return cnames; } public Set<Rotation> rotations() { return rotations; } } }
cpuPercentageOfHost -> cpuUsageRatioOfHost
public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); double cpuUsageRatioOfHost = lastCpuMetric.getCpuUsageRatio(); double cpuUsageRatioOfAllocated = totalNumCpuCores * cpuUsageRatioOfHost / nodeSpec.minCpuCores; double cpuKernelUsageRatioOfAllocated = cpuUsageRatioOfAllocated * lastCpuMetric.getCpuKernelUsageRatio(); long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); }
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final AclMaintainer aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final AclMaintainer aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge.minus(Duration.between(lastConverge, clock.instant())).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } void converge() { final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname) .orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } } @SuppressWarnings("unchecked") private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = totalSystemUsage - this.totalSystemUsage; deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } double getCpuKernelUsageRatio() { return deltaContainerUsage == 0 ? 0 : (double) deltaContainerKernelUsage / deltaContainerUsage; } double getCpuUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final AclMaintainer aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final AclMaintainer aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge.minus(Duration.between(lastConverge, clock.instant())).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } void converge() { final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname) .orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } } @SuppressWarnings("unchecked") private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = totalSystemUsage - this.totalSystemUsage; deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
I would have expected: `double cpuKernelUsageRatioOfAllocated = totalNumCpuCores * lastCpuMetric.getCpuKernelUsageRatio() / nodeSpec.minCpuCores *` Why use cpuUsageRatioOfHost?
public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); double cpuUsageRatioOfHost = lastCpuMetric.getCpuUsageRatio(); double cpuUsageRatioOfAllocated = totalNumCpuCores * cpuUsageRatioOfHost / nodeSpec.minCpuCores; double cpuKernelUsageRatioOfAllocated = cpuUsageRatioOfAllocated * lastCpuMetric.getCpuKernelUsageRatio(); long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); }
double cpuKernelUsageRatioOfAllocated = cpuUsageRatioOfAllocated * lastCpuMetric.getCpuKernelUsageRatio();
public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final AclMaintainer aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final AclMaintainer aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge.minus(Duration.between(lastConverge, clock.instant())).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } void converge() { final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname) .orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } } @SuppressWarnings("unchecked") private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = totalSystemUsage - this.totalSystemUsage; deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } double getCpuKernelUsageRatio() { return deltaContainerUsage == 0 ? 0 : (double) deltaContainerKernelUsage / deltaContainerUsage; } double getCpuUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final AclMaintainer aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final AclMaintainer aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge.minus(Duration.between(lastConverge, clock.instant())).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } void converge() { final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname) .orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } } @SuppressWarnings("unchecked") private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = totalSystemUsage - this.totalSystemUsage; deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
Should we start with just logging here before enabling it?
private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": " + dnsName + " -> " + rotation.name()); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": " + dnsName + " -> " + rotation.name()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } }
nameService.updateRecord(record.get().id(), rotationName);
private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId).map(application1 -> new LockedApplication(application1, lock)).orElse(new LockedApplication( new Application(applicationId), lock) ); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, Zone zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the application with this id * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the application does not exist */ public void deleteApplication(ApplicationId id, Optional<NToken> token) { if ( ! controller.applications().get(id).isPresent()) throw new NotExistsException("Could not delete application '" + id + "': Application not found"); lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, Zone zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, Zone zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId).map(application1 -> new LockedApplication(application1, lock)).orElse(new LockedApplication( new Application(applicationId), lock) ); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, Zone zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the application with this id * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the application does not exist */ public void deleteApplication(ApplicationId id, Optional<NToken> token) { if ( ! controller.applications().get(id).isPresent()) throw new NotExistsException("Could not delete application '" + id + "': Application not found"); lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, Zone zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, Zone zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
I took a shortcut, `getCpuKernelUsageRatio()` returned ratio of time that was spent in kernel mode of _non-idle CPU time_. Anyway, I see this is confusing now so I rearranged the terms so that they are calculated equally now.
public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); double cpuUsageRatioOfHost = lastCpuMetric.getCpuUsageRatio(); double cpuUsageRatioOfAllocated = totalNumCpuCores * cpuUsageRatioOfHost / nodeSpec.minCpuCores; double cpuKernelUsageRatioOfAllocated = cpuUsageRatioOfAllocated * lastCpuMetric.getCpuKernelUsageRatio(); long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); }
double cpuKernelUsageRatioOfAllocated = cpuUsageRatioOfAllocated * lastCpuMetric.getCpuKernelUsageRatio();
public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final AclMaintainer aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final AclMaintainer aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge.minus(Duration.between(lastConverge, clock.instant())).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } void converge() { final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname) .orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } } @SuppressWarnings("unchecked") private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = totalSystemUsage - this.totalSystemUsage; deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } double getCpuKernelUsageRatio() { return deltaContainerUsage == 0 ? 0 : (double) deltaContainerKernelUsage / deltaContainerUsage; } double getCpuUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final AclMaintainer aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final AclMaintainer aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge.minus(Duration.between(lastConverge, clock.instant())).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } void converge() { final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname) .orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } } @SuppressWarnings("unchecked") private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = totalSystemUsage - this.totalSystemUsage; deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? 0 : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
This is fine because SpecVerifier apparently appends own port and protocol if not set (https://github.com/vespa-engine/vespa/blob/master/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/verification/commons/HostURLGenerator.java)
public String getHardwareDivergence() { String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.spec.SpecVerifier", configServers); }
.map(URI::getHost)
public String getHardwareDivergence() { String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.spec.SpecVerifier", configServers); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final Docker docker; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(Docker docker, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.docker = docker; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); } public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/"); Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa")); SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") .withTag("parentHostname", environment.getParentHostHostname()); Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life")); SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath) .withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", nodeSpec.nodeFlavor) .withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor) .withTag("state", nodeSpec.nodeState.toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); nodeSpec.owner.ifPresent(owner -> hostLifeSchedule .withTag("tenantName", owner.tenant) .withTag("app", owner.application + "." + owner.instance) .withTag("applicationName", owner.application) .withTag("instanceName", owner.instance) .withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)); nodeSpec.membership.ifPresent(membership -> hostLifeSchedule .withTag("clustertype", membership.clusterType) .withTag("clusterid", membership.clusterId)); nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version)); try { vespaSchedule.writeTo(yamasAgentFolder); hostLifeSchedule.writeTo(yamasAgentFolder); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; docker.executeInContainerAsRoot(containerName, restartYamasAgent); } catch (IOException e) { throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e); } } public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + nodeSpec, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { String[] pathsToClean = { getDefaults().underVespaHome("logs/elasticsearch2"), getDefaults().underVespaHome("logs/logstash2"), getDefaults().underVespaHome("logs/daemontools_y"), getDefaults().underVespaHome("logs/nginx"), getDefaults().underVespaHome("logs/vespa") }; for (String pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, boolean force) { if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, ContainerNodeSpec nodeSpec) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", nodeSpec.hostname); attributes.put("parent_hostname", HostName.getLocalhost()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", nodeSpec.nodeFlavor); attributes.put("kernel_version", System.getProperty("os.version")); nodeSpec.currentDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString())); nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version)); nodeSpec.owner.ifPresent(owner -> { attributes.put("tenant", owner.tenant); attributes.put("application", owner.application); attributes.put("instance", owner.instance); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, ContainerNodeSpec nodeSpec) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/")) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @throws RuntimeException if exit code != 0 */ private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final Docker docker; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(Docker docker, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.docker = docker; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); } public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/"); Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa")); SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") .withTag("parentHostname", environment.getParentHostHostname()); Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life")); SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath) .withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", nodeSpec.nodeFlavor) .withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor) .withTag("state", nodeSpec.nodeState.toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); nodeSpec.owner.ifPresent(owner -> hostLifeSchedule .withTag("tenantName", owner.tenant) .withTag("app", owner.application + "." + owner.instance) .withTag("applicationName", owner.application) .withTag("instanceName", owner.instance) .withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)); nodeSpec.membership.ifPresent(membership -> hostLifeSchedule .withTag("clustertype", membership.clusterType) .withTag("clusterid", membership.clusterId)); nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version)); try { vespaSchedule.writeTo(yamasAgentFolder); hostLifeSchedule.writeTo(yamasAgentFolder); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; docker.executeInContainerAsRoot(containerName, restartYamasAgent); } catch (IOException e) { throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e); } } public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + nodeSpec, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { String[] pathsToClean = { getDefaults().underVespaHome("logs/elasticsearch2"), getDefaults().underVespaHome("logs/logstash2"), getDefaults().underVespaHome("logs/daemontools_y"), getDefaults().underVespaHome("logs/nginx"), getDefaults().underVespaHome("logs/vespa") }; for (String pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, boolean force) { if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, ContainerNodeSpec nodeSpec) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", nodeSpec.hostname); attributes.put("parent_hostname", HostName.getLocalhost()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", nodeSpec.nodeFlavor); attributes.put("kernel_version", System.getProperty("os.version")); nodeSpec.currentDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString())); nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version)); nodeSpec.owner.ifPresent(owner -> { attributes.put("tenant", owner.tenant); attributes.put("application", owner.application); attributes.put("instance", owner.instance); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, ContainerNodeSpec nodeSpec) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/")) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @throws RuntimeException if exit code != 0 */ private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
I like this is being done ASAP when reading from the environment.
private static List<URI> getConfigServerUrlsFromEnvironment() { String scheme = getEnvironmentVariable(ENV_CONFIGSERVER_SCHEME); String configServerHosts = getEnvironmentVariable(ENV_CONFIGSERVER_HOSTS); String port = getEnvironmentVariable(ENV_CONFIGSERVER_PORT); return Arrays.stream(configServerHosts.split("[,\\s]+")) .map(hostname -> URI.create(scheme + ": .collect(Collectors.toList()); }
return Arrays.stream(configServerHosts.split("[,\\s]+"))
private static List<URI> getConfigServerUrlsFromEnvironment() { String scheme = getEnvironmentVariable(ENV_CONFIGSERVER_SCHEME); String configServerHosts = getEnvironmentVariable(ENV_CONFIGSERVER_HOSTS); String port = getEnvironmentVariable(ENV_CONFIGSERVER_PORT); return Arrays.stream(configServerHosts.split("[,\\s]+")) .map(hostname -> URI.create(scheme + ": .collect(Collectors.toList()); }
class Environment { private static final DateFormat filenameFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS"); public static final String APPLICATION_STORAGE_CLEANUP_PATH_PREFIX = "cleanup_"; private static final String ENV_CONFIGSERVER_SCHEME = "CONFIG_SERVER_SCHEME"; private static final String ENV_CONFIGSERVER_HOSTS = "CONFIG_SERVER_ADDRESS"; private static final String ENV_CONFIGSERVER_PORT = "CONFIG_SERVER_PORT"; private static final String ENVIRONMENT = "ENVIRONMENT"; private static final String REGION = "REGION"; private static final String LOGSTASH_NODES = "LOGSTASH_NODES"; private static final String COREDUMP_FEED_ENDPOINT = "COREDUMP_FEED_ENDPOINT"; private final List<URI> configServerHosts; private final String environment; private final String region; private final String parentHostHostname; private final InetAddressResolver inetAddressResolver; private final PathResolver pathResolver; private final List<String> logstashNodes; private final String feedEndpoint; static { filenameFormatter.setTimeZone(TimeZone.getTimeZone("UTC")); } public Environment() { this(getConfigServerUrlsFromEnvironment(), getEnvironmentVariable(ENVIRONMENT), getEnvironmentVariable(REGION), HostName.getLocalhost(), new InetAddressResolver(), new PathResolver(), getLogstashNodesFromEnvironment(), getEnvironmentVariable(COREDUMP_FEED_ENDPOINT)); } public Environment(List<URI> configServerHosts, String environment, String region, String parentHostHostname, InetAddressResolver inetAddressResolver, PathResolver pathResolver, List<String> logstashNodes, String feedEndpoint) { this.configServerHosts = configServerHosts; this.environment = environment; this.region = region; this.parentHostHostname = parentHostHostname; this.inetAddressResolver = inetAddressResolver; this.pathResolver = pathResolver; this.logstashNodes = logstashNodes; this.feedEndpoint = feedEndpoint; } public List<URI> getConfigServerUris() { return configServerHosts; } public String getEnvironment() { return environment; } public String getRegion() { return region; } public String getParentHostHostname() { return parentHostHostname; } private static String getEnvironmentVariable(String name) { final String value = System.getenv(name); if (Strings.isNullOrEmpty(value)) { throw new IllegalStateException(String.format("Environment variable %s not set", name)); } return value; } public String getZone() { return getEnvironment() + "." + getRegion(); } private static List<String> getLogstashNodesFromEnvironment() { String logstashNodes = System.getenv(LOGSTASH_NODES); if(Strings.isNullOrEmpty(logstashNodes)) { return Collections.emptyList(); } return Arrays.asList(logstashNodes.split("[,\\s]+")); } public InetAddress getInetAddressForHost(String hostname) throws UnknownHostException { return inetAddressResolver.getInetAddressForHost(hostname); } public PathResolver getPathResolver() { return pathResolver; } public String getCoredumpFeedEndpoint() { return feedEndpoint; } /** * Absolute path in node admin to directory with processed and reported core dumps */ public Path pathInNodeAdminToDoneCoredumps() { return pathResolver.getApplicationStoragePathForNodeAdmin().resolve("processed-coredumps"); } /** * Absolute path in node admin container to the node cleanup directory. */ public Path pathInNodeAdminToNodeCleanup(ContainerName containerName) { return pathResolver.getApplicationStoragePathForNodeAdmin() .resolve(APPLICATION_STORAGE_CLEANUP_PATH_PREFIX + containerName.asString() + "_" + filenameFormatter.format(Date.from(Instant.now()))); } /** * Translates an absolute path in node agent container to an absolute path in node admin container. * @param containerName name of the node agent container * @param absolutePathInNode absolute path in that container * @return the absolute path in node admin container pointing at the same inode */ public Path pathInNodeAdminFromPathInNode(ContainerName containerName, String absolutePathInNode) { Path pathInNode = Paths.get(absolutePathInNode); if (! pathInNode.isAbsolute()) { throw new IllegalArgumentException("The specified path in node was not absolute: " + absolutePathInNode); } return pathResolver.getApplicationStoragePathForNodeAdmin() .resolve(containerName.asString()) .resolve(PathResolver.ROOT.relativize(pathInNode)); } /** * Translates an absolute path in node agent container to an absolute path in host. * @param containerName name of the node agent container * @param absolutePathInNode absolute path in that container * @return the absolute path in host pointing at the same inode */ public Path pathInHostFromPathInNode(ContainerName containerName, String absolutePathInNode) { Path pathInNode = Paths.get(absolutePathInNode); if (! pathInNode.isAbsolute()) { throw new IllegalArgumentException("The specified path in node was not absolute: " + absolutePathInNode); } return pathResolver.getApplicationStoragePathForHost() .resolve(containerName.asString()) .resolve(PathResolver.ROOT.relativize(pathInNode)); } public List<String> getLogstashNodes() { return logstashNodes; } public static class Builder { private List<URI> configServerHosts = Collections.emptyList(); private String environment; private String region; private String parentHostHostname; private InetAddressResolver inetAddressResolver; private PathResolver pathResolver; private List<String> logstashNodes = Collections.emptyList(); private String feedEndpoint; public Builder configServerUris(String... hosts) { configServerHosts = Arrays.stream(hosts) .map(URI::create) .collect(Collectors.toList()); return this; } public Builder environment(String environment) { this.environment = environment; return this; } public Builder region(String region) { this.region = region; return this; } public Builder parentHostHostname(String parentHostHostname) { this.parentHostHostname = parentHostHostname; return this; } public Builder inetAddressResolver(InetAddressResolver inetAddressResolver) { this.inetAddressResolver = inetAddressResolver; return this; } public Builder pathResolver(PathResolver pathResolver) { this.pathResolver = pathResolver; return this; } public Builder logstashNodes(List<String> hosts) { this.logstashNodes = hosts; return this; } public Builder feedEndpoint(String feedEndpoint) { this.feedEndpoint = feedEndpoint; return this; } public Environment build() { return new Environment(configServerHosts, environment, region, parentHostHostname, inetAddressResolver, pathResolver, logstashNodes, feedEndpoint); } } }
class Environment { private static final DateFormat filenameFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS"); public static final String APPLICATION_STORAGE_CLEANUP_PATH_PREFIX = "cleanup_"; private static final String ENV_CONFIGSERVER_SCHEME = "CONFIG_SERVER_SCHEME"; private static final String ENV_CONFIGSERVER_HOSTS = "CONFIG_SERVER_ADDRESS"; private static final String ENV_CONFIGSERVER_PORT = "CONFIG_SERVER_PORT"; private static final String ENVIRONMENT = "ENVIRONMENT"; private static final String REGION = "REGION"; private static final String LOGSTASH_NODES = "LOGSTASH_NODES"; private static final String COREDUMP_FEED_ENDPOINT = "COREDUMP_FEED_ENDPOINT"; private final List<URI> configServerHosts; private final String environment; private final String region; private final String parentHostHostname; private final InetAddressResolver inetAddressResolver; private final PathResolver pathResolver; private final List<String> logstashNodes; private final String feedEndpoint; static { filenameFormatter.setTimeZone(TimeZone.getTimeZone("UTC")); } public Environment() { this(getConfigServerUrlsFromEnvironment(), getEnvironmentVariable(ENVIRONMENT), getEnvironmentVariable(REGION), HostName.getLocalhost(), new InetAddressResolver(), new PathResolver(), getLogstashNodesFromEnvironment(), getEnvironmentVariable(COREDUMP_FEED_ENDPOINT)); } public Environment(List<URI> configServerHosts, String environment, String region, String parentHostHostname, InetAddressResolver inetAddressResolver, PathResolver pathResolver, List<String> logstashNodes, String feedEndpoint) { this.configServerHosts = configServerHosts; this.environment = environment; this.region = region; this.parentHostHostname = parentHostHostname; this.inetAddressResolver = inetAddressResolver; this.pathResolver = pathResolver; this.logstashNodes = logstashNodes; this.feedEndpoint = feedEndpoint; } public List<URI> getConfigServerUris() { return configServerHosts; } public String getEnvironment() { return environment; } public String getRegion() { return region; } public String getParentHostHostname() { return parentHostHostname; } private static String getEnvironmentVariable(String name) { final String value = System.getenv(name); if (Strings.isNullOrEmpty(value)) { throw new IllegalStateException(String.format("Environment variable %s not set", name)); } return value; } public String getZone() { return getEnvironment() + "." + getRegion(); } private static List<String> getLogstashNodesFromEnvironment() { String logstashNodes = System.getenv(LOGSTASH_NODES); if(Strings.isNullOrEmpty(logstashNodes)) { return Collections.emptyList(); } return Arrays.asList(logstashNodes.split("[,\\s]+")); } public InetAddress getInetAddressForHost(String hostname) throws UnknownHostException { return inetAddressResolver.getInetAddressForHost(hostname); } public PathResolver getPathResolver() { return pathResolver; } public String getCoredumpFeedEndpoint() { return feedEndpoint; } /** * Absolute path in node admin to directory with processed and reported core dumps */ public Path pathInNodeAdminToDoneCoredumps() { return pathResolver.getApplicationStoragePathForNodeAdmin().resolve("processed-coredumps"); } /** * Absolute path in node admin container to the node cleanup directory. */ public Path pathInNodeAdminToNodeCleanup(ContainerName containerName) { return pathResolver.getApplicationStoragePathForNodeAdmin() .resolve(APPLICATION_STORAGE_CLEANUP_PATH_PREFIX + containerName.asString() + "_" + filenameFormatter.format(Date.from(Instant.now()))); } /** * Translates an absolute path in node agent container to an absolute path in node admin container. * @param containerName name of the node agent container * @param absolutePathInNode absolute path in that container * @return the absolute path in node admin container pointing at the same inode */ public Path pathInNodeAdminFromPathInNode(ContainerName containerName, String absolutePathInNode) { Path pathInNode = Paths.get(absolutePathInNode); if (! pathInNode.isAbsolute()) { throw new IllegalArgumentException("The specified path in node was not absolute: " + absolutePathInNode); } return pathResolver.getApplicationStoragePathForNodeAdmin() .resolve(containerName.asString()) .resolve(PathResolver.ROOT.relativize(pathInNode)); } /** * Translates an absolute path in node agent container to an absolute path in host. * @param containerName name of the node agent container * @param absolutePathInNode absolute path in that container * @return the absolute path in host pointing at the same inode */ public Path pathInHostFromPathInNode(ContainerName containerName, String absolutePathInNode) { Path pathInNode = Paths.get(absolutePathInNode); if (! pathInNode.isAbsolute()) { throw new IllegalArgumentException("The specified path in node was not absolute: " + absolutePathInNode); } return pathResolver.getApplicationStoragePathForHost() .resolve(containerName.asString()) .resolve(PathResolver.ROOT.relativize(pathInNode)); } public List<String> getLogstashNodes() { return logstashNodes; } public static class Builder { private List<URI> configServerHosts = Collections.emptyList(); private String environment; private String region; private String parentHostHostname; private InetAddressResolver inetAddressResolver; private PathResolver pathResolver; private List<String> logstashNodes = Collections.emptyList(); private String feedEndpoint; public Builder configServerUris(String... hosts) { configServerHosts = Arrays.stream(hosts) .map(URI::create) .collect(Collectors.toList()); return this; } public Builder environment(String environment) { this.environment = environment; return this; } public Builder region(String region) { this.region = region; return this; } public Builder parentHostHostname(String parentHostHostname) { this.parentHostHostname = parentHostHostname; return this; } public Builder inetAddressResolver(InetAddressResolver inetAddressResolver) { this.inetAddressResolver = inetAddressResolver; return this; } public Builder pathResolver(PathResolver pathResolver) { this.pathResolver = pathResolver; return this; } public Builder logstashNodes(List<String> hosts) { this.logstashNodes = hosts; return this; } public Builder feedEndpoint(String feedEndpoint) { this.feedEndpoint = feedEndpoint; return this; } public Environment build() { return new Environment(configServerHosts, environment, region, parentHostHostname, inetAddressResolver, pathResolver, logstashNodes, feedEndpoint); } } }
shouldn't the scheme strings be enum constants?
private static Registry<ConnectionSocketFactory> getConnectionSocketFactoryRegistry() { try { SSLConnectionSocketFactory sslSocketFactory = new SSLConnectionSocketFactory( new SSLContextBuilder().loadTrustMaterial(null, (arg0, arg1) -> true).build(), NoopHostnameVerifier.INSTANCE); return RegistryBuilder.<ConnectionSocketFactory>create() .register("http", PlainConnectionSocketFactory.getSocketFactory()) .register("https", sslSocketFactory) .build(); } catch (GeneralSecurityException e) { throw new RuntimeException("Failed to create SSL context", e); } }
.register("https", sslSocketFactory)
private static Registry<ConnectionSocketFactory> getConnectionSocketFactoryRegistry() { try { SSLConnectionSocketFactory sslSocketFactory = new SSLConnectionSocketFactory( new SSLContextBuilder().loadTrustMaterial(null, (arg0, arg1) -> true).build(), NoopHostnameVerifier.INSTANCE); return RegistryBuilder.<ConnectionSocketFactory>create() .register("http", PlainConnectionSocketFactory.getSocketFactory()) .register("https", sslSocketFactory) .build(); } catch (GeneralSecurityException e) { throw new RuntimeException("Failed to create SSL context", e); } }
class ConfigServerHttpRequestExecutor { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerHttpRequestExecutor.class); private static final int MAX_LOOPS = 2; private final ObjectMapper mapper = new ObjectMapper(); private final CloseableHttpClient client; private final List<URI> configServerHosts; @Override public void finalize() throws Throwable { try { client.close(); } catch (Exception e) { NODE_ADMIN_LOGGER.warning("Ignoring exception thrown when closing client against " + configServerHosts, e); } super.finalize(); } public static ConfigServerHttpRequestExecutor create(Collection<URI> configServerUris) { PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(getConnectionSocketFactoryRegistry()); cm.setMaxTotal(200); return new ConfigServerHttpRequestExecutor(randomizeConfigServerUris(configServerUris), HttpClientBuilder.create() .disableAutomaticRetries() .setUserAgent("node-admin") .setConnectionManager(cm).build()); } ConfigServerHttpRequestExecutor(List<URI> configServerHosts, CloseableHttpClient client) { this.configServerHosts = configServerHosts; this.client = client; } public interface CreateRequest { HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException; } private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (int loopRetry = 0; loopRetry < MAX_LOOPS; loopRetry++) { for (URI configServer : configServerHosts) { final CloseableHttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); } catch (Exception e) { if (e.getMessage().indexOf("(Connection refused)") > 0) { NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } lastException = e; continue; } try { Optional<HttpException> retryableException = HttpException.handleStatusCode( response.getStatusLine().getStatusCode(), "Config server " + configServer); if (retryableException.isPresent()) { lastException = retryableException.get(); continue; } try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { throw new RuntimeException("Response didn't contain nodes element, failed parsing?", e); } } finally { try { response.close(); } catch (IOException e) { NODE_ADMIN_LOGGER.warning("Ignoring exception from closing response", e); } } } } throw new RuntimeException("All requests against the config servers (" + configServerHosts + ") failed, last as follows:", lastException); } public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPut put = new HttpPut(configServer.resolve(path)); setContentTypeToApplicationJson(put); if (bodyJsonPojo.isPresent()) { put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get()))); } return put; }, wantedReturnType); } public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPatch patch = new HttpPatch(configServer.resolve(path)); setContentTypeToApplicationJson(patch); patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return patch; }, wantedReturnType); } public <T> T delete(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpDelete(configServer.resolve(path)), wantedReturnType); } public <T> T get(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpGet(configServer.resolve(path)), wantedReturnType); } public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPost post = new HttpPost(configServer.resolve(path)); setContentTypeToApplicationJson(post); post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return post; }, wantedReturnType); } private void setContentTypeToApplicationJson(HttpRequestBase request) { request.setHeader(HttpHeaders.CONTENT_TYPE, "application/json"); } private static List<URI> randomizeConfigServerUris(Collection<URI> configServerUris) { List<URI> shuffledConfigServerHosts = new ArrayList<>(configServerUris); Collections.shuffle(shuffledConfigServerHosts); return shuffledConfigServerHosts; } }
class ConfigServerHttpRequestExecutor { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerHttpRequestExecutor.class); private static final int MAX_LOOPS = 2; private final ObjectMapper mapper = new ObjectMapper(); private final CloseableHttpClient client; private final List<URI> configServerHosts; @Override public void finalize() throws Throwable { try { client.close(); } catch (Exception e) { NODE_ADMIN_LOGGER.warning("Ignoring exception thrown when closing client against " + configServerHosts, e); } super.finalize(); } public static ConfigServerHttpRequestExecutor create(Collection<URI> configServerUris) { PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(getConnectionSocketFactoryRegistry()); cm.setMaxTotal(200); return new ConfigServerHttpRequestExecutor(randomizeConfigServerUris(configServerUris), HttpClientBuilder.create() .disableAutomaticRetries() .setUserAgent("node-admin") .setConnectionManager(cm).build()); } ConfigServerHttpRequestExecutor(List<URI> configServerHosts, CloseableHttpClient client) { this.configServerHosts = configServerHosts; this.client = client; } public interface CreateRequest { HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException; } private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (int loopRetry = 0; loopRetry < MAX_LOOPS; loopRetry++) { for (URI configServer : configServerHosts) { final CloseableHttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); } catch (Exception e) { if (e.getMessage().indexOf("(Connection refused)") > 0) { NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } lastException = e; continue; } try { Optional<HttpException> retryableException = HttpException.handleStatusCode( response.getStatusLine().getStatusCode(), "Config server " + configServer); if (retryableException.isPresent()) { lastException = retryableException.get(); continue; } try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { throw new RuntimeException("Response didn't contain nodes element, failed parsing?", e); } } finally { try { response.close(); } catch (IOException e) { NODE_ADMIN_LOGGER.warning("Ignoring exception from closing response", e); } } } } throw new RuntimeException("All requests against the config servers (" + configServerHosts + ") failed, last as follows:", lastException); } public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPut put = new HttpPut(configServer.resolve(path)); setContentTypeToApplicationJson(put); if (bodyJsonPojo.isPresent()) { put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get()))); } return put; }, wantedReturnType); } public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPatch patch = new HttpPatch(configServer.resolve(path)); setContentTypeToApplicationJson(patch); patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return patch; }, wantedReturnType); } public <T> T delete(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpDelete(configServer.resolve(path)), wantedReturnType); } public <T> T get(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpGet(configServer.resolve(path)), wantedReturnType); } public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPost post = new HttpPost(configServer.resolve(path)); setContentTypeToApplicationJson(post); post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return post; }, wantedReturnType); } private void setContentTypeToApplicationJson(HttpRequestBase request) { request.setHeader(HttpHeaders.CONTENT_TYPE, "application/json"); } private static List<URI> randomizeConfigServerUris(Collection<URI> configServerUris) { List<URI> shuffledConfigServerHosts = new ArrayList<>(configServerUris); Collections.shuffle(shuffledConfigServerHosts); return shuffledConfigServerHosts; } }
here may be return null
public LocatedFileStatus next() throws IOException { if (hasNext()) { LocatedFileStatus result = curFile; curFile = null; return result; } throw new java.util.NoSuchElementException("No more entry in " + f); }
return result;
public LocatedFileStatus next() throws IOException { if (hasNext()) { LocatedFileStatus result = curFile; curFile = null; return result; } throw new java.util.NoSuchElementException("No more entry in " + f); }
class HiveRemoteFileIO implements RemoteFileIO { private static final Logger LOG = LogManager.getLogger(HiveRemoteFileIO.class); private final Configuration configuration; private FileSystem fileSystem; private final Map<String, Long> blockHostToId = new ConcurrentHashMap<>(); private final Map<Long, String> idToBlockHost = new ConcurrentHashMap<>(); private long hostId = 0; private static final int UNKNOWN_STORAGE_ID = -1; public HiveRemoteFileIO(Configuration configuration) { this.configuration = configuration; } public Map<RemotePathKey, List<RemoteFileDesc>> getRemoteFiles(RemotePathKey pathKey) { ImmutableMap.Builder<RemotePathKey, List<RemoteFileDesc>> resultPartitions = ImmutableMap.builder(); String path = ObjectStorageUtils.formatObjectStoragePath(pathKey.getPath()); List<RemoteFileDesc> fileDescs = Lists.newArrayList(); try { URI uri = new Path(path).toUri(); FileSystem fileSystem; if (!FeConstants.runningUnitTest) { fileSystem = FileSystem.get(uri, configuration); } else { fileSystem = this.fileSystem; } RemoteIterator<LocatedFileStatus> blockIterator; if (!pathKey.isRecursive()) { blockIterator = fileSystem.listLocatedStatus(new Path(uri.getPath())); } else { blockIterator = listFilesRecursive(fileSystem, new Path(uri.getPath())); } while (blockIterator.hasNext()) { LocatedFileStatus locatedFileStatus = blockIterator.next(); if (!isValidDataFile(locatedFileStatus)) { continue; } String locateName = locatedFileStatus.getPath().toUri().getPath(); String fileName = PartitionUtil.getSuffixName(uri.getPath(), locateName); BlockLocation[] blockLocations = locatedFileStatus.getBlockLocations(); List<RemoteFileBlockDesc> fileBlockDescs = getRemoteFileBlockDesc(blockLocations); fileDescs.add(new RemoteFileDesc(fileName, "", locatedFileStatus.getLen(), locatedFileStatus.getModificationTime(), ImmutableList.copyOf(fileBlockDescs), ImmutableList.of())); } } catch (FileNotFoundException e) { LOG.warn("Hive remote file on path: {} not existed, ignore it", path, e); } catch (Exception e) { LOG.error("Failed to get hive remote file's metadata on path: {}", path, e); throw new StarRocksConnectorException("Failed to get hive remote file's metadata on path: %s. msg: %s", pathKey, e.getMessage()); } return resultPartitions.put(pathKey, fileDescs).build(); } private RemoteIterator<LocatedFileStatus> listFilesRecursive(FileSystem fileSystem, Path f) throws FileNotFoundException, IOException { return new RemoteIterator<LocatedFileStatus>() { private Stack<RemoteIterator<LocatedFileStatus>> itors = new Stack<>(); private RemoteIterator<LocatedFileStatus> curItor = fileSystem.listLocatedStatus(f); private LocatedFileStatus curFile; @Override public boolean hasNext() throws IOException { while (curFile == null) { if (curItor.hasNext()) { handleFileStat(curItor.next()); } else if (!itors.empty()) { curItor = itors.pop(); } else { return false; } } return true; } private void handleFileStat(LocatedFileStatus stat) throws IOException { if (stat.isFile()) { curFile = stat; } else if (isValidDirectory(stat)) { try { RemoteIterator<LocatedFileStatus> newDirItor = fileSystem.listLocatedStatus(stat.getPath()); itors.push(curItor); curItor = newDirItor; } catch (FileNotFoundException ignored) { LOG.debug("Directory {} deleted while attempting for recursive listing", stat.getPath()); } } } @Override }; } private boolean isValidDataFile(FileStatus fileStatus) { if (!fileStatus.isFile()) { return false; } String lcFileName = fileStatus.getPath().getName().toLowerCase(); return !(lcFileName.startsWith(".") || lcFileName.startsWith("_") || lcFileName.endsWith(".copying") || lcFileName.endsWith(".tmp")); } private boolean isValidDirectory(FileStatus fileStatus) { if (!fileStatus.isDirectory()) { return false; } String dirName = fileStatus.getPath().getName(); return !(dirName.startsWith(".")); } protected List<RemoteFileBlockDesc> getRemoteFileBlockDesc(BlockLocation[] blockLocations) throws IOException { List<RemoteFileBlockDesc> fileBlockDescs = Lists.newArrayList(); for (BlockLocation blockLocation : blockLocations) { fileBlockDescs.add(buildRemoteFileBlockDesc( blockLocation.getOffset(), blockLocation.getLength(), getReplicaHostIds(blockLocation.getNames())) ); } return fileBlockDescs; } public RemoteFileBlockDesc buildRemoteFileBlockDesc(long offset, long length, long[] replicaHostIds) { return new RemoteFileBlockDesc(offset, length, replicaHostIds, new long[] {UNKNOWN_STORAGE_ID}, this); } public long[] getReplicaHostIds(String[] hostNames) { long[] replicaHostIds = new long[hostNames.length]; for (int j = 0; j < hostNames.length; j++) { String name = hostNames[j]; replicaHostIds[j] = getHostId(name); } return replicaHostIds; } public long getHostId(String hostName) { return blockHostToId.computeIfAbsent(hostName, k -> { long newId = hostId++; idToBlockHost.put(newId, hostName); return newId; }); } public String getHdfsDataNodeIp(long hostId) { String hostPort = idToBlockHost.get(hostId); return hostPort.split(":")[0]; } @VisibleForTesting public void setFileSystem(FileSystem fs) { this.fileSystem = fs; } }
class HiveRemoteFileIO implements RemoteFileIO { private static final Logger LOG = LogManager.getLogger(HiveRemoteFileIO.class); private final Configuration configuration; private FileSystem fileSystem; private final Map<String, Long> blockHostToId = new ConcurrentHashMap<>(); private final Map<Long, String> idToBlockHost = new ConcurrentHashMap<>(); private long hostId = 0; private static final int UNKNOWN_STORAGE_ID = -1; public HiveRemoteFileIO(Configuration configuration) { this.configuration = configuration; } public Map<RemotePathKey, List<RemoteFileDesc>> getRemoteFiles(RemotePathKey pathKey) { ImmutableMap.Builder<RemotePathKey, List<RemoteFileDesc>> resultPartitions = ImmutableMap.builder(); String path = ObjectStorageUtils.formatObjectStoragePath(pathKey.getPath()); List<RemoteFileDesc> fileDescs = Lists.newArrayList(); try { URI uri = new Path(path).toUri(); FileSystem fileSystem; if (!FeConstants.runningUnitTest) { fileSystem = FileSystem.get(uri, configuration); } else { fileSystem = this.fileSystem; } RemoteIterator<LocatedFileStatus> blockIterator; if (!pathKey.isRecursive()) { blockIterator = fileSystem.listLocatedStatus(new Path(uri.getPath())); } else { blockIterator = listFilesRecursive(fileSystem, new Path(uri.getPath())); } while (blockIterator.hasNext()) { LocatedFileStatus locatedFileStatus = blockIterator.next(); if (!isValidDataFile(locatedFileStatus)) { continue; } String locateName = locatedFileStatus.getPath().toUri().getPath(); String fileName = PartitionUtil.getSuffixName(uri.getPath(), locateName); BlockLocation[] blockLocations = locatedFileStatus.getBlockLocations(); List<RemoteFileBlockDesc> fileBlockDescs = getRemoteFileBlockDesc(blockLocations); fileDescs.add(new RemoteFileDesc(fileName, "", locatedFileStatus.getLen(), locatedFileStatus.getModificationTime(), ImmutableList.copyOf(fileBlockDescs), ImmutableList.of())); } } catch (FileNotFoundException e) { LOG.warn("Hive remote file on path: {} not existed, ignore it", path, e); } catch (Exception e) { LOG.error("Failed to get hive remote file's metadata on path: {}", path, e); throw new StarRocksConnectorException("Failed to get hive remote file's metadata on path: %s. msg: %s", pathKey, e.getMessage()); } return resultPartitions.put(pathKey, fileDescs).build(); } private RemoteIterator<LocatedFileStatus> listFilesRecursive(FileSystem fileSystem, Path f) throws FileNotFoundException, IOException { return new RemoteIterator<LocatedFileStatus>() { private Stack<RemoteIterator<LocatedFileStatus>> itors = new Stack<>(); private RemoteIterator<LocatedFileStatus> curItor = fileSystem.listLocatedStatus(f); private LocatedFileStatus curFile; @Override public boolean hasNext() throws IOException { while (curFile == null) { if (curItor.hasNext()) { handleFileStat(curItor.next()); } else if (!itors.empty()) { curItor = itors.pop(); } else { return false; } } return true; } private void handleFileStat(LocatedFileStatus stat) throws IOException { if (stat.isFile()) { curFile = stat; } else if (isValidDirectory(stat)) { try { RemoteIterator<LocatedFileStatus> newDirItor = fileSystem.listLocatedStatus(stat.getPath()); itors.push(curItor); curItor = newDirItor; } catch (FileNotFoundException ignored) { LOG.debug("Directory {} deleted while attempting for recursive listing", stat.getPath()); } } } @Override }; } private boolean isValidDataFile(FileStatus fileStatus) { if (!fileStatus.isFile()) { return false; } String lcFileName = fileStatus.getPath().getName().toLowerCase(); return !(lcFileName.startsWith(".") || lcFileName.startsWith("_") || lcFileName.endsWith(".copying") || lcFileName.endsWith(".tmp")); } private boolean isValidDirectory(FileStatus fileStatus) { if (!fileStatus.isDirectory()) { return false; } String dirName = fileStatus.getPath().getName(); return !(dirName.startsWith(".")); } protected List<RemoteFileBlockDesc> getRemoteFileBlockDesc(BlockLocation[] blockLocations) throws IOException { List<RemoteFileBlockDesc> fileBlockDescs = Lists.newArrayList(); for (BlockLocation blockLocation : blockLocations) { fileBlockDescs.add(buildRemoteFileBlockDesc( blockLocation.getOffset(), blockLocation.getLength(), getReplicaHostIds(blockLocation.getNames())) ); } return fileBlockDescs; } public RemoteFileBlockDesc buildRemoteFileBlockDesc(long offset, long length, long[] replicaHostIds) { return new RemoteFileBlockDesc(offset, length, replicaHostIds, new long[] {UNKNOWN_STORAGE_ID}, this); } public long[] getReplicaHostIds(String[] hostNames) { long[] replicaHostIds = new long[hostNames.length]; for (int j = 0; j < hostNames.length; j++) { String name = hostNames[j]; replicaHostIds[j] = getHostId(name); } return replicaHostIds; } public long getHostId(String hostName) { return blockHostToId.computeIfAbsent(hostName, k -> { long newId = hostId++; idToBlockHost.put(newId, hostName); return newId; }); } public String getHdfsDataNodeIp(long hostId) { String hostPort = idToBlockHost.get(hostId); return hostPort.split(":")[0]; } @VisibleForTesting public void setFileSystem(FileSystem fs) { this.fileSystem = fs; } }
I think it will be fine. I have test case in CD I'll verify on next run.
private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": " + dnsName + " -> " + rotation.name()); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": " + dnsName + " -> " + rotation.name()); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } }
nameService.updateRecord(record.get().id(), rotationName);
private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId).map(application1 -> new LockedApplication(application1, lock)).orElse(new LockedApplication( new Application(applicationId), lock) ); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, Zone zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the application with this id * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the application does not exist */ public void deleteApplication(ApplicationId id, Optional<NToken> token) { if ( ! controller.applications().get(id).isPresent()) throw new NotExistsException("Could not delete application '" + id + "': Application not found"); lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, Zone zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, Zone zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId).map(application1 -> new LockedApplication(application1, lock)).orElse(new LockedApplication( new Application(applicationId), lock) ); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, Zone zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the application with this id * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the application does not exist */ public void deleteApplication(ApplicationId id, Optional<NToken> token) { if ( ! controller.applications().get(id).isPresent()) throw new NotExistsException("Could not delete application '" + id + "': Application not found"); lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, Zone zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, Zone zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
I guess you didn't mean to rename all 'file' to 'inprogressFile'
private List<Method> receiveFileMethod(Object handler) { List<Method> methods = new ArrayList<>(); methods.add(new Method(RECEIVE_META_METHOD, "sssl", "ii", handler,"receiveFileMeta") .paramDesc(0, "filereference", "inprogressFile reference to download") .paramDesc(1, "type", "'inprogressFile' or 'compressed'") .paramDesc(2, "filename", "filename") .paramDesc(3, "filelength", "length in bytes of inprogressFile") .returnDesc(0, "ret", "0 if success, 1 otherwise") .returnDesc(1, "session-id", "Session id to be used for this transfer")); methods.add(new Method(RECEIVE_PART_METHOD, "siix", "i", handler,"receiveFilePart") .paramDesc(0, "filereference", "inprogressFile reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "data", "bytes in this part") .returnDesc(0, "ret", "0 if success, 1 otherwise")); methods.add(new Method(RECEIVE_EOF_METHOD, "silis", "i", handler,"receiveFileEof") .paramDesc(0, "filereference", "inprogressFile reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "crc-code", "crc code (xxhash64)") .paramDesc(3, "error-code", "Error code. 0 if none") .paramDesc(4, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 if crc mismatch, 2 otherwise")); methods.add(new Method(RECEIVE_METHOD, "sssxlis", "i", handler, "receiveFile") .methodDesc("receive inprogressFile reference content") .paramDesc(0, "inprogressFile reference", "inprogressFile reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "type", "'inprogressFile' or 'compressed'") .paramDesc(3, "content", "array of bytes") .paramDesc(4, "hash", "xx64hash of the inprogressFile content") .paramDesc(5, "errorcode", "Error code. 0 if none") .paramDesc(6, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 otherwise")); return methods; }
.paramDesc(0, "filereference", "inprogressFile reference to download")
private List<Method> receiveFileMethod(Object handler) { List<Method> methods = new ArrayList<>(); methods.add(new Method(RECEIVE_META_METHOD, "sssl", "ii", handler,"receiveFileMeta") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "type", "'file' or 'compressed'") .paramDesc(2, "filename", "filename") .paramDesc(3, "filelength", "length in bytes of file") .returnDesc(0, "ret", "0 if success, 1 otherwise") .returnDesc(1, "session-id", "Session id to be used for this transfer")); methods.add(new Method(RECEIVE_PART_METHOD, "siix", "i", handler,"receiveFilePart") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "data", "bytes in this part") .returnDesc(0, "ret", "0 if success, 1 otherwise")); methods.add(new Method(RECEIVE_EOF_METHOD, "silis", "i", handler,"receiveFileEof") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "crc-code", "crc code (xxhash64)") .paramDesc(3, "error-code", "Error code. 0 if none") .paramDesc(4, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 if crc mismatch, 2 otherwise")); methods.add(new Method(RECEIVE_METHOD, "sssxlis", "i", handler, "receiveFile") .methodDesc("receive file reference content") .paramDesc(0, "file reference", "file reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "type", "'file' or 'compressed'") .paramDesc(3, "content", "array of bytes") .paramDesc(4, "hash", "xx64hash of the file content") .paramDesc(5, "errorcode", "Error code. 0 if none") .paramDesc(6, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 otherwise")); return methods; }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File inprogressFile; Session(File downloadDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory(" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory(" + fileReferenceDir.toPath() + "): ", e); } try { inprogressFile = Files.createTempFile(fileReferenceDir.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating tempfile for inprogress file for(" + fileName + ") in '" + fileReferenceDir.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the inprogressFile from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory("archive").toFile(); log.log(LogLevel.DEBUG, "Archived inprogressFile, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { log.log(LogLevel.DEBUG, "Uncompressed inprogressFile, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing inprogressFile: " + e.getMessage(), e); throw new RuntimeException("Failed writing inprogressFile: ", e); } return file; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File inprogressFile; Session(File downloadDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory(" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory(" + fileReferenceDir.toPath() + "): ", e); } try { inprogressFile = Files.createTempFile(fileReferenceDir.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating tempfile for inprogress file for(" + fileName + ") in '" + fileReferenceDir.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory("archive").toFile(); log.log(LogLevel.DEBUG, "Archived file, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { log.log(LogLevel.DEBUG, "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } return file; } }
Fixed
private List<Method> receiveFileMethod(Object handler) { List<Method> methods = new ArrayList<>(); methods.add(new Method(RECEIVE_META_METHOD, "sssl", "ii", handler,"receiveFileMeta") .paramDesc(0, "filereference", "inprogressFile reference to download") .paramDesc(1, "type", "'inprogressFile' or 'compressed'") .paramDesc(2, "filename", "filename") .paramDesc(3, "filelength", "length in bytes of inprogressFile") .returnDesc(0, "ret", "0 if success, 1 otherwise") .returnDesc(1, "session-id", "Session id to be used for this transfer")); methods.add(new Method(RECEIVE_PART_METHOD, "siix", "i", handler,"receiveFilePart") .paramDesc(0, "filereference", "inprogressFile reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "data", "bytes in this part") .returnDesc(0, "ret", "0 if success, 1 otherwise")); methods.add(new Method(RECEIVE_EOF_METHOD, "silis", "i", handler,"receiveFileEof") .paramDesc(0, "filereference", "inprogressFile reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "crc-code", "crc code (xxhash64)") .paramDesc(3, "error-code", "Error code. 0 if none") .paramDesc(4, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 if crc mismatch, 2 otherwise")); methods.add(new Method(RECEIVE_METHOD, "sssxlis", "i", handler, "receiveFile") .methodDesc("receive inprogressFile reference content") .paramDesc(0, "inprogressFile reference", "inprogressFile reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "type", "'inprogressFile' or 'compressed'") .paramDesc(3, "content", "array of bytes") .paramDesc(4, "hash", "xx64hash of the inprogressFile content") .paramDesc(5, "errorcode", "Error code. 0 if none") .paramDesc(6, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 otherwise")); return methods; }
.paramDesc(0, "filereference", "inprogressFile reference to download")
private List<Method> receiveFileMethod(Object handler) { List<Method> methods = new ArrayList<>(); methods.add(new Method(RECEIVE_META_METHOD, "sssl", "ii", handler,"receiveFileMeta") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "type", "'file' or 'compressed'") .paramDesc(2, "filename", "filename") .paramDesc(3, "filelength", "length in bytes of file") .returnDesc(0, "ret", "0 if success, 1 otherwise") .returnDesc(1, "session-id", "Session id to be used for this transfer")); methods.add(new Method(RECEIVE_PART_METHOD, "siix", "i", handler,"receiveFilePart") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "partid", "relative part number starting at zero") .paramDesc(3, "data", "bytes in this part") .returnDesc(0, "ret", "0 if success, 1 otherwise")); methods.add(new Method(RECEIVE_EOF_METHOD, "silis", "i", handler,"receiveFileEof") .paramDesc(0, "filereference", "file reference to download") .paramDesc(1, "session-id", "Session id to be used for this transfer") .paramDesc(2, "crc-code", "crc code (xxhash64)") .paramDesc(3, "error-code", "Error code. 0 if none") .paramDesc(4, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 if crc mismatch, 2 otherwise")); methods.add(new Method(RECEIVE_METHOD, "sssxlis", "i", handler, "receiveFile") .methodDesc("receive file reference content") .paramDesc(0, "file reference", "file reference to download") .paramDesc(1, "filename", "filename") .paramDesc(2, "type", "'file' or 'compressed'") .paramDesc(3, "content", "array of bytes") .paramDesc(4, "hash", "xx64hash of the file content") .paramDesc(5, "errorcode", "Error code. 0 if none") .paramDesc(6, "error-description", "Error description.") .returnDesc(0, "ret", "0 if success, 1 otherwise")); return methods; }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File inprogressFile; Session(File downloadDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory(" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory(" + fileReferenceDir.toPath() + "): ", e); } try { inprogressFile = Files.createTempFile(fileReferenceDir.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating tempfile for inprogress file for(" + fileName + ") in '" + fileReferenceDir.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the inprogressFile from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory("archive").toFile(); log.log(LogLevel.DEBUG, "Archived inprogressFile, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { log.log(LogLevel.DEBUG, "Uncompressed inprogressFile, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing inprogressFile: " + e.getMessage(), e); throw new RuntimeException("Failed writing inprogressFile: ", e); } return file; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File inprogressFile; Session(File downloadDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory(" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory(" + fileReferenceDir.toPath() + "): ", e); } try { inprogressFile = Files.createTempFile(fileReferenceDir.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating tempfile for inprogress file for(" + fileName + ") in '" + fileReferenceDir.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory("archive").toFile(); log.log(LogLevel.DEBUG, "Archived file, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { log.log(LogLevel.DEBUG, "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } return file; } }
indentation
public void status(Status status) { this.status = status; }
}
public void status(Status status) { log.log(LogLevel.INFO, "Changing health status code from '" + this.status + "' to '" + status.name() + "'"); this.status = status; }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
set status to down after server.run()? Is this ever reached? Or more slightly more generally, is there a good place to set status down?
public void run() { if (versionState.isUpgraded()) { log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { applicationRepository.redeployAllApplications(deployer); } catch (InterruptedException e) { throw new RuntimeException("Redeploying applications failed", e); } log.log(LogLevel.INFO, "All applications redeployed"); } versionState.saveNewVersion(); stateMonitor.status(StateMonitor.Status.up); log.log(LogLevel.DEBUG, "Starting RPC server"); server.run(); log.log(LogLevel.DEBUG, "RPC server stopped"); }
log.log(LogLevel.DEBUG, "RPC server stopped");
public void run() { if (versionState.isUpgraded()) { log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { applicationRepository.redeployAllApplications(deployer); } catch (InterruptedException e) { throw new RuntimeException("Redeploying applications failed", e); } log.log(LogLevel.INFO, "All applications redeployed"); } versionState.saveNewVersion(); stateMonitor.status(StateMonitor.Status.up); log.log(LogLevel.DEBUG, "Starting RPC server"); server.run(); log.log(LogLevel.DEBUG, "RPC server stopped"); stateMonitor.status(StateMonitor.Status.down); }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName()); private final ApplicationRepository applicationRepository; private final RpcServer server; private final Thread serverThread; private final Deployer deployer; private final VersionState versionState; private final StateMonitor stateMonitor; @SuppressWarnings("WeakerAccess") @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, Deployer deployer, VersionState versionState, StateMonitor stateMonitor) { this.applicationRepository = applicationRepository; this.server = server; this.deployer = deployer; this.versionState = versionState; this.stateMonitor = stateMonitor; this.serverThread = new Thread(this, "configserver main"); serverThread.start(); } @Override public void deconstruct() { log.log(LogLevel.INFO, "Stopping config server"); server.stop(); try { serverThread.join(); } catch (InterruptedException e) { log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage()); } } @Override }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName()); private final ApplicationRepository applicationRepository; private final RpcServer server; private final Thread serverThread; private final Deployer deployer; private final VersionState versionState; private final StateMonitor stateMonitor; @SuppressWarnings("WeakerAccess") @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, Deployer deployer, VersionState versionState, StateMonitor stateMonitor) { this.applicationRepository = applicationRepository; this.server = server; this.deployer = deployer; this.versionState = versionState; this.stateMonitor = stateMonitor; this.serverThread = new Thread(this, "configserver main"); serverThread.start(); } @Override public void deconstruct() { log.log(LogLevel.INFO, "Stopping config server"); server.stop(); try { serverThread.join(); } catch (InterruptedException e) { log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage()); } } @Override }
This seems worthy of an INFO log message (if status actually changes)
public void status(Status status) { this.status = status; }
this.status = status;
public void status(Status status) { log.log(LogLevel.INFO, "Changing health status code from '" + this.status + "' to '" + status.name() + "'"); this.status = status; }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
It will only be reached for a short time when the server goes down (that's why I didn't set it), but thinking about it I think it's correct to set it to down anyway.
public void run() { if (versionState.isUpgraded()) { log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { applicationRepository.redeployAllApplications(deployer); } catch (InterruptedException e) { throw new RuntimeException("Redeploying applications failed", e); } log.log(LogLevel.INFO, "All applications redeployed"); } versionState.saveNewVersion(); stateMonitor.status(StateMonitor.Status.up); log.log(LogLevel.DEBUG, "Starting RPC server"); server.run(); log.log(LogLevel.DEBUG, "RPC server stopped"); }
log.log(LogLevel.DEBUG, "RPC server stopped");
public void run() { if (versionState.isUpgraded()) { log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications"); try { applicationRepository.redeployAllApplications(deployer); } catch (InterruptedException e) { throw new RuntimeException("Redeploying applications failed", e); } log.log(LogLevel.INFO, "All applications redeployed"); } versionState.saveNewVersion(); stateMonitor.status(StateMonitor.Status.up); log.log(LogLevel.DEBUG, "Starting RPC server"); server.run(); log.log(LogLevel.DEBUG, "RPC server stopped"); stateMonitor.status(StateMonitor.Status.down); }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName()); private final ApplicationRepository applicationRepository; private final RpcServer server; private final Thread serverThread; private final Deployer deployer; private final VersionState versionState; private final StateMonitor stateMonitor; @SuppressWarnings("WeakerAccess") @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, Deployer deployer, VersionState versionState, StateMonitor stateMonitor) { this.applicationRepository = applicationRepository; this.server = server; this.deployer = deployer; this.versionState = versionState; this.stateMonitor = stateMonitor; this.serverThread = new Thread(this, "configserver main"); serverThread.start(); } @Override public void deconstruct() { log.log(LogLevel.INFO, "Stopping config server"); server.stop(); try { serverThread.join(); } catch (InterruptedException e) { log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage()); } } @Override }
class ConfigServerBootstrap extends AbstractComponent implements Runnable { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName()); private final ApplicationRepository applicationRepository; private final RpcServer server; private final Thread serverThread; private final Deployer deployer; private final VersionState versionState; private final StateMonitor stateMonitor; @SuppressWarnings("WeakerAccess") @Inject public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, Deployer deployer, VersionState versionState, StateMonitor stateMonitor) { this.applicationRepository = applicationRepository; this.server = server; this.deployer = deployer; this.versionState = versionState; this.stateMonitor = stateMonitor; this.serverThread = new Thread(this, "configserver main"); serverThread.start(); } @Override public void deconstruct() { log.log(LogLevel.INFO, "Stopping config server"); server.stop(); try { serverThread.join(); } catch (InterruptedException e) { log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage()); } } @Override }
Good point, done
public void status(Status status) { this.status = status; }
this.status = status;
public void status(Status status) { log.log(LogLevel.INFO, "Changing health status code from '" + this.status + "' to '" + status.name() + "'"); this.status = status; }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
Wrong indentation
private KeyManager[] createKeyManagersWithServiceCertificate() { try { credentialsRetrievedSignal.await(); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry("instance-key", credentials.get().getKeyPair().getPrivate(), new char[0], new Certificate[]{credentials.get().getCertificate()}); KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, new char[0]); return keyManagerFactory.getKeyManagers(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException | CertificateException | IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new AthenzIdentityProviderException("Failed to register instance credentials", lastThrowable.get()); } }
credentialsRetrievedSignal.await();
private KeyManager[] createKeyManagersWithServiceCertificate() { try { credentialsRetrievedSignal.await(); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry("instance-key", credentials.get().getKeyPair().getPrivate(), new char[0], new Certificate[]{credentials.get().getCertificate()}); KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, new char[0]); return keyManagerFactory.getKeyManagers(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException | CertificateException | IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new AthenzIdentityProviderException("Failed to register instance credentials", lastThrowable.get()); } }
class AthenzIdentityProviderImpl extends AbstractComponent implements AthenzIdentityProvider { private static final Logger log = Logger.getLogger(AthenzIdentityProviderImpl.class.getName()); static final Duration EXPIRES_AFTER = Duration.ofDays(1); static final Duration EXPIRATION_MARGIN = Duration.ofMinutes(30); static final Duration INITIAL_WAIT_NTOKEN = Duration.ofMinutes(5); static final Duration UPDATE_PERIOD = EXPIRES_AFTER.dividedBy(3); static final Duration REDUCED_UPDATE_PERIOD = Duration.ofMinutes(30); static final Duration INITIAL_BACKOFF_DELAY = Duration.ofMinutes(4); static final Duration MAX_REGISTER_BACKOFF_DELAY = Duration.ofHours(1); static final int BACKOFF_DELAY_MULTIPLIER = 2; static final Duration AWAIT_TERMINTATION_TIMEOUT = Duration.ofSeconds(90); static final String REGISTER_INSTANCE_TAG = "register-instance"; static final String UPDATE_CREDENTIALS_TAG = "update-credentials"; static final String TIMEOUT_INITIAL_WAIT_TAG = "timeout-initial-wait"; private final AtomicReference<AthenzCredentials> credentials = new AtomicReference<>(); private final AtomicReference<Throwable> lastThrowable = new AtomicReference<>(); private final CountDownLatch credentialsRetrievedSignal = new CountDownLatch(1); private final AthenzCredentialsService athenzCredentialsService; private final Scheduler scheduler; private final Clock clock; private final String domain; private final String service; @Inject public AthenzIdentityProviderImpl(IdentityConfig config) { this(config, new AthenzCredentialsService(config, new IdentityDocumentService(config.loadBalancerAddress()), new AthenzService(), Clock.systemUTC()), new ThreadPoolScheduler(), Clock.systemUTC()); } AthenzIdentityProviderImpl(IdentityConfig config, AthenzCredentialsService athenzCredentialsService, Scheduler scheduler, Clock clock) { this.athenzCredentialsService = athenzCredentialsService; this.scheduler = scheduler; this.clock = clock; this.domain = config.domain(); this.service = config.service(); scheduler.submit(new RegisterInstanceTask()); scheduler.schedule(new TimeoutInitialWaitTask(), INITIAL_WAIT_NTOKEN); } @Override public String getNToken() { try { credentialsRetrievedSignal.await(); AthenzCredentials credentialsSnapshot = credentials.get(); if (credentialsSnapshot == null) { throw new AthenzIdentityProviderException("Could not retrieve Athenz credentials", lastThrowable.get()); } if (isExpired(credentialsSnapshot)) { throw new AthenzIdentityProviderException("Athenz credentials are expired", lastThrowable.get()); } return credentialsSnapshot.getNToken(); } catch (InterruptedException e) { throw new AthenzIdentityProviderException("Failed to register instance credentials", lastThrowable.get()); } } @Override public String getDomain() { return domain; } @Override public String getService() { return service; } @Override public SSLContext getSslContext() { try { SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); sslContext.init(createKeyManagersWithServiceCertificate(), createTrustManagersWithAthenzCa(), null); return sslContext; } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new RuntimeException(e); } } private static TrustManager[] createTrustManagersWithAthenzCa() { try { KeyStore trustStore = KeyStore.getInstance("JKS"); try (FileInputStream in = new FileInputStream("/home/y/share/ssl/certs/yahoo_certificate_bundle.jks")) { trustStore.load(in, null); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); trustManagerFactory.init(trustStore); return trustManagerFactory.getTrustManagers(); } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } } @Override public void deconstruct() { scheduler.shutdown(AWAIT_TERMINTATION_TIMEOUT); } private boolean isExpired(AthenzCredentials credentials) { return clock.instant().isAfter(getExpirationTime(credentials)); } private static Instant getExpirationTime(AthenzCredentials credentials) { return credentials.getCreatedAt().plus(EXPIRES_AFTER).minus(EXPIRATION_MARGIN); } private class RegisterInstanceTask implements RunnableWithTag { private final Duration backoffDelay; RegisterInstanceTask() { this(INITIAL_BACKOFF_DELAY); } RegisterInstanceTask(Duration backoffDelay) { this.backoffDelay = backoffDelay; } @Override public void run() { try { credentials.set(athenzCredentialsService.registerInstance()); credentialsRetrievedSignal.countDown(); scheduler.schedule(new UpdateCredentialsTask(), UPDATE_PERIOD); } catch (Throwable t) { log.log(LogLevel.ERROR, "Failed to register instance: " + t.getMessage(), t); lastThrowable.set(t); Duration nextBackoffDelay = backoffDelay.multipliedBy(BACKOFF_DELAY_MULTIPLIER); if (nextBackoffDelay.compareTo(MAX_REGISTER_BACKOFF_DELAY) > 0) { nextBackoffDelay = MAX_REGISTER_BACKOFF_DELAY; } scheduler.schedule(new RegisterInstanceTask(nextBackoffDelay), backoffDelay); } } @Override public String tag() { return REGISTER_INSTANCE_TAG; } } private class UpdateCredentialsTask implements RunnableWithTag { @Override public void run() { AthenzCredentials currentCredentials = credentials.get(); try { AthenzCredentials newCredentials = isExpired(currentCredentials) ? athenzCredentialsService.registerInstance() : athenzCredentialsService.updateCredentials(currentCredentials); credentials.set(newCredentials); scheduler.schedule(new UpdateCredentialsTask(), UPDATE_PERIOD); } catch (Throwable t) { log.log(LogLevel.WARNING, "Failed to update credentials: " + t.getMessage(), t); lastThrowable.set(t); Duration timeToExpiration = Duration.between(clock.instant(), getExpirationTime(currentCredentials)); Duration updatePeriod = timeToExpiration.compareTo(UPDATE_PERIOD) > 0 ? UPDATE_PERIOD : REDUCED_UPDATE_PERIOD; scheduler.schedule(new UpdateCredentialsTask(), updatePeriod); } } @Override public String tag() { return UPDATE_CREDENTIALS_TAG; } } private class TimeoutInitialWaitTask implements RunnableWithTag { @Override public void run() { credentialsRetrievedSignal.countDown(); } @Override public String tag() { return TIMEOUT_INITIAL_WAIT_TAG; } } private static class ThreadPoolScheduler implements Scheduler { private static final Logger log = Logger.getLogger(ThreadPoolScheduler.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(0); @Override public void schedule(RunnableWithTag runnable, Duration delay) { log.log(LogLevel.FINE, String.format("Scheduling task '%s' in '%s'", runnable.tag(), delay)); executor.schedule(runnable, delay.getSeconds(), TimeUnit.SECONDS); } @Override public void submit(RunnableWithTag runnable) { log.log(LogLevel.FINE, String.format("Scheduling task '%s' now", runnable.tag())); executor.submit(runnable); } @Override public void shutdown(Duration timeout) { try { executor.shutdownNow(); executor.awaitTermination(AWAIT_TERMINTATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException(e); } } } public interface Scheduler { void schedule(RunnableWithTag runnable, Duration delay); default void submit(RunnableWithTag runnable) { schedule(runnable, Duration.ZERO); } default void shutdown(Duration timeout) {} } public interface RunnableWithTag extends Runnable { String tag(); } }
class AthenzIdentityProviderImpl extends AbstractComponent implements AthenzIdentityProvider { private static final Logger log = Logger.getLogger(AthenzIdentityProviderImpl.class.getName()); static final Duration EXPIRES_AFTER = Duration.ofDays(1); static final Duration EXPIRATION_MARGIN = Duration.ofMinutes(30); static final Duration INITIAL_WAIT_NTOKEN = Duration.ofMinutes(5); static final Duration UPDATE_PERIOD = EXPIRES_AFTER.dividedBy(3); static final Duration REDUCED_UPDATE_PERIOD = Duration.ofMinutes(30); static final Duration INITIAL_BACKOFF_DELAY = Duration.ofMinutes(4); static final Duration MAX_REGISTER_BACKOFF_DELAY = Duration.ofHours(1); static final int BACKOFF_DELAY_MULTIPLIER = 2; static final Duration AWAIT_TERMINTATION_TIMEOUT = Duration.ofSeconds(90); static final String REGISTER_INSTANCE_TAG = "register-instance"; static final String UPDATE_CREDENTIALS_TAG = "update-credentials"; static final String TIMEOUT_INITIAL_WAIT_TAG = "timeout-initial-wait"; private final AtomicReference<AthenzCredentials> credentials = new AtomicReference<>(); private final AtomicReference<Throwable> lastThrowable = new AtomicReference<>(); private final CountDownLatch credentialsRetrievedSignal = new CountDownLatch(1); private final AthenzCredentialsService athenzCredentialsService; private final Scheduler scheduler; private final Clock clock; private final String domain; private final String service; @Inject public AthenzIdentityProviderImpl(IdentityConfig config) { this(config, new AthenzCredentialsService(config, new IdentityDocumentService(config.loadBalancerAddress()), new AthenzService(), Clock.systemUTC()), new ThreadPoolScheduler(), Clock.systemUTC()); } AthenzIdentityProviderImpl(IdentityConfig config, AthenzCredentialsService athenzCredentialsService, Scheduler scheduler, Clock clock) { this.athenzCredentialsService = athenzCredentialsService; this.scheduler = scheduler; this.clock = clock; this.domain = config.domain(); this.service = config.service(); scheduler.submit(new RegisterInstanceTask()); scheduler.schedule(new TimeoutInitialWaitTask(), INITIAL_WAIT_NTOKEN); } @Override public String getNToken() { try { credentialsRetrievedSignal.await(); AthenzCredentials credentialsSnapshot = credentials.get(); if (credentialsSnapshot == null) { throw new AthenzIdentityProviderException("Could not retrieve Athenz credentials", lastThrowable.get()); } if (isExpired(credentialsSnapshot)) { throw new AthenzIdentityProviderException("Athenz credentials are expired", lastThrowable.get()); } return credentialsSnapshot.getNToken(); } catch (InterruptedException e) { throw new AthenzIdentityProviderException("Failed to register instance credentials", lastThrowable.get()); } } @Override public String getDomain() { return domain; } @Override public String getService() { return service; } @Override public SSLContext getSslContext() { try { SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); sslContext.init(createKeyManagersWithServiceCertificate(), createTrustManagersWithAthenzCa(), null); return sslContext; } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new RuntimeException(e); } } private static TrustManager[] createTrustManagersWithAthenzCa() { try { KeyStore trustStore = KeyStore.getInstance("JKS"); try (FileInputStream in = new FileInputStream("/home/y/share/ssl/certs/yahoo_certificate_bundle.jks")) { trustStore.load(in, null); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); trustManagerFactory.init(trustStore); return trustManagerFactory.getTrustManagers(); } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } } @Override public void deconstruct() { scheduler.shutdown(AWAIT_TERMINTATION_TIMEOUT); } private boolean isExpired(AthenzCredentials credentials) { return clock.instant().isAfter(getExpirationTime(credentials)); } private static Instant getExpirationTime(AthenzCredentials credentials) { return credentials.getCreatedAt().plus(EXPIRES_AFTER).minus(EXPIRATION_MARGIN); } private class RegisterInstanceTask implements RunnableWithTag { private final Duration backoffDelay; RegisterInstanceTask() { this(INITIAL_BACKOFF_DELAY); } RegisterInstanceTask(Duration backoffDelay) { this.backoffDelay = backoffDelay; } @Override public void run() { try { credentials.set(athenzCredentialsService.registerInstance()); credentialsRetrievedSignal.countDown(); scheduler.schedule(new UpdateCredentialsTask(), UPDATE_PERIOD); } catch (Throwable t) { log.log(LogLevel.ERROR, "Failed to register instance: " + t.getMessage(), t); lastThrowable.set(t); Duration nextBackoffDelay = backoffDelay.multipliedBy(BACKOFF_DELAY_MULTIPLIER); if (nextBackoffDelay.compareTo(MAX_REGISTER_BACKOFF_DELAY) > 0) { nextBackoffDelay = MAX_REGISTER_BACKOFF_DELAY; } scheduler.schedule(new RegisterInstanceTask(nextBackoffDelay), backoffDelay); } } @Override public String tag() { return REGISTER_INSTANCE_TAG; } } private class UpdateCredentialsTask implements RunnableWithTag { @Override public void run() { AthenzCredentials currentCredentials = credentials.get(); try { AthenzCredentials newCredentials = isExpired(currentCredentials) ? athenzCredentialsService.registerInstance() : athenzCredentialsService.updateCredentials(currentCredentials); credentials.set(newCredentials); scheduler.schedule(new UpdateCredentialsTask(), UPDATE_PERIOD); } catch (Throwable t) { log.log(LogLevel.WARNING, "Failed to update credentials: " + t.getMessage(), t); lastThrowable.set(t); Duration timeToExpiration = Duration.between(clock.instant(), getExpirationTime(currentCredentials)); Duration updatePeriod = timeToExpiration.compareTo(UPDATE_PERIOD) > 0 ? UPDATE_PERIOD : REDUCED_UPDATE_PERIOD; scheduler.schedule(new UpdateCredentialsTask(), updatePeriod); } } @Override public String tag() { return UPDATE_CREDENTIALS_TAG; } } private class TimeoutInitialWaitTask implements RunnableWithTag { @Override public void run() { credentialsRetrievedSignal.countDown(); } @Override public String tag() { return TIMEOUT_INITIAL_WAIT_TAG; } } private static class ThreadPoolScheduler implements Scheduler { private static final Logger log = Logger.getLogger(ThreadPoolScheduler.class.getName()); private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(0); @Override public void schedule(RunnableWithTag runnable, Duration delay) { log.log(LogLevel.FINE, String.format("Scheduling task '%s' in '%s'", runnable.tag(), delay)); executor.schedule(runnable, delay.getSeconds(), TimeUnit.SECONDS); } @Override public void submit(RunnableWithTag runnable) { log.log(LogLevel.FINE, String.format("Scheduling task '%s' now", runnable.tag())); executor.submit(runnable); } @Override public void shutdown(Duration timeout) { try { executor.shutdownNow(); executor.awaitTermination(AWAIT_TERMINTATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException(e); } } } public interface Scheduler { void schedule(RunnableWithTag runnable, Duration delay); default void submit(RunnableWithTag runnable) { schedule(runnable, Duration.ZERO); } default void shutdown(Duration timeout) {} } public interface RunnableWithTag extends Runnable { String tag(); } }
fixed
public void status(Status status) { this.status = status; }
}
public void status(Status status) { log.log(LogLevel.INFO, "Changing health status code from '" + this.status + "' to '" + status.name() + "'"); this.status = status; }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
class StateMonitor extends AbstractComponent { private final static Logger log = Logger.getLogger(StateMonitor.class.getName()); public enum Status {up, down, initializing}; private final CopyOnWriteArrayList<StateMetricConsumer> consumers = new CopyOnWriteArrayList<>(); private final Thread thread; private final Timer timer; private final long snapshotIntervalMs; private long lastSnapshotTimeMs; private volatile MetricSnapshot snapshot; private volatile Status status; private final TreeSet<String> valueNames = new TreeSet<>(); @Inject public StateMonitor(HealthMonitorConfig config, Timer timer) { this.timer = timer; this.snapshotIntervalMs = (long)(config.snapshot_interval() * TimeUnit.SECONDS.toMillis(1)); this.lastSnapshotTimeMs = timer.currentTimeMillis(); this.status = Status.valueOf(config.initialStatus()); thread = new Thread(StateMonitor.this::run, "StateMonitor"); thread.setDaemon(true); thread.start(); } /** Returns a metric consumer for jDisc which will write metrics back to this */ public MetricConsumer newMetricConsumer() { StateMetricConsumer consumer = new StateMetricConsumer(); consumers.add(consumer); return consumer; } public Status status() { return status; } /** Returns the last snapshot taken of the metrics in this system */ public MetricSnapshot snapshot() { return snapshot; } /** Returns the interval between each metrics snapshot used by this */ public long getSnapshotIntervalMillis() { return snapshotIntervalMs; } boolean checkTime() { long now = timer.currentTimeMillis(); if (now < lastSnapshotTimeMs + snapshotIntervalMs) { return false; } snapshot = createSnapshot(lastSnapshotTimeMs, now); lastSnapshotTimeMs = now; return true; } private void run() { log.finest("StateMonitor started."); try { while (!Thread.interrupted()) { checkTime(); Thread.sleep((lastSnapshotTimeMs + snapshotIntervalMs) - timer.currentTimeMillis()); } } catch (InterruptedException e) { } log.finest("StateMonitor stopped."); } private MetricSnapshot createSnapshot(long fromMillis, long toMillis) { MetricSnapshot snapshot = new MetricSnapshot(fromMillis, toMillis, TimeUnit.MILLISECONDS); for (StateMetricConsumer consumer : consumers) { snapshot.add(consumer.createSnapshot()); } updateNames(snapshot); return snapshot; } private void updateNames(MetricSnapshot current) { TreeSet<String> seen = new TreeSet<>(); for (Map.Entry<MetricDimensions, MetricSet> dimensionAndMetric : current) { for (Map.Entry<String, MetricValue> nameAndMetric : dimensionAndMetric.getValue()) { seen.add(nameAndMetric.getKey()); } } synchronized (valueNames) { for (String name : valueNames) { if (!seen.contains(name)) { current.add((MetricDimensions) StateMetricConsumer.NULL_CONTEXT, name, 0); } } valueNames.addAll(seen); } } @Override public void deconstruct() { thread.interrupt(); try { thread.join(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (thread.isAlive()) { log.warning("StateMonitor failed to terminate within 5 seconds of interrupt signal. Ignoring."); } } }
You shouldn't remove @Override
public static Zone defaultZone() { return new Zone(SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); }
return new Zone(SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName());
public static Zone defaultZone() { return new Zone(SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); }
class Zone { private final SystemName systemName; private final FlavorDefaults flavorDefaults; private final Optional<NodeFlavors> nodeFlavors; private final ZoneId id; @Inject public Zone(ConfigserverConfig configserverConfig, NodeFlavors nodeFlavors) { this(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region()), new FlavorDefaults(configserverConfig), nodeFlavors); } /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(systemName, environment, region, new FlavorDefaults("default"), null); } private Zone(SystemName systemName, Environment environment, RegionName region, FlavorDefaults flavorDefaults, NodeFlavors nodeFlavors) { this.id = new ZoneId(environment, region); this.flavorDefaults = flavorDefaults; this.systemName = systemName; this.nodeFlavors = Optional.ofNullable(nodeFlavors); } /** Returns the id of this */ public ZoneId id() { return id; } /** Returns the current environment */ public Environment environment() { return id.environment(); } /** Returns the current region */ public RegionName region() { return id.region(); } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the default hardware flavor to assign in this zone */ public String defaultFlavor(ClusterSpec.Type clusterType) { return flavorDefaults.flavor(clusterType); } /** Returns all available node flavors for the zone, or empty if not set for this Zone. */ public Optional<NodeFlavors> nodeFlavors() { return nodeFlavors; } /** Do not use */ public String toString() { return id.toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof Zone)) return false; return Objects.equals(id, ((Zone) o).id); } public int hashCode() { return id.hashCode(); } private static class FlavorDefaults { /** The default default flavor */ private final String defaultFlavor; /** The default flavor for each cluster type, or empty to use defaultFlavor */ private final Optional<String> adminFlavor; private final Optional<String> containerFlavor; private final Optional<String> contentFlavor; /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor) { this(defaultFlavor, Optional.empty(), Optional.empty(), Optional.empty()); } /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor, Optional<String> adminFlavor, Optional<String> containerFlavor, Optional<String> contentFlavor) { this.defaultFlavor = defaultFlavor; this.adminFlavor = adminFlavor; this.containerFlavor = containerFlavor; this.contentFlavor = contentFlavor; } public FlavorDefaults(ConfigserverConfig config) { this(config.defaultFlavor(), emptyIfDefault(config.defaultAdminFlavor()), emptyIfDefault(config.defaultContainerFlavor()), emptyIfDefault(config.defaultContentFlavor())); } /** Map "default" to empty - this config cannot have missing values due to the need for supporting non-hosted */ private static Optional<String> emptyIfDefault(String value) { if (Strings.isNullOrEmpty(value)) return Optional.empty(); if (value.equals("default")) return Optional.empty(); return Optional.of(value); } /** * Returns the flavor default for a given cluster type. * This may be "default" - which is an invalid value - but never null. */ public String flavor(ClusterSpec.Type clusterType) { switch (clusterType) { case admin: return adminFlavor.orElse(defaultFlavor); case container: return containerFlavor.orElse(defaultFlavor); case content: return contentFlavor.orElse(defaultFlavor); default: return defaultFlavor; } } } }
class Zone { private final SystemName systemName; private final FlavorDefaults flavorDefaults; private final Optional<NodeFlavors> nodeFlavors; private final ZoneId id; @Inject public Zone(ConfigserverConfig configserverConfig, NodeFlavors nodeFlavors) { this(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region()), new FlavorDefaults(configserverConfig), nodeFlavors); } /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(systemName, environment, region, new FlavorDefaults("default"), null); } private Zone(SystemName systemName, Environment environment, RegionName region, FlavorDefaults flavorDefaults, NodeFlavors nodeFlavors) { this.id = ZoneId.from(environment, region); this.flavorDefaults = flavorDefaults; this.systemName = systemName; this.nodeFlavors = Optional.ofNullable(nodeFlavors); } /** Returns the id of this */ public ZoneId id() { return id; } /** Returns the current environment */ public Environment environment() { return id.environment(); } /** Returns the current region */ public RegionName region() { return id.region(); } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the default hardware flavor to assign in this zone */ public String defaultFlavor(ClusterSpec.Type clusterType) { return flavorDefaults.flavor(clusterType); } /** Returns all available node flavors for the zone, or empty if not set for this Zone. */ public Optional<NodeFlavors> nodeFlavors() { return nodeFlavors; } /** Do not use */ @Override public String toString() { return id.toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof Zone)) return false; return Objects.equals(id, ((Zone) o).id); } @Override public int hashCode() { return id.hashCode(); } private static class FlavorDefaults { /** The default default flavor */ private final String defaultFlavor; /** The default flavor for each cluster type, or empty to use defaultFlavor */ private final Optional<String> adminFlavor; private final Optional<String> containerFlavor; private final Optional<String> contentFlavor; /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor) { this(defaultFlavor, Optional.empty(), Optional.empty(), Optional.empty()); } /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor, Optional<String> adminFlavor, Optional<String> containerFlavor, Optional<String> contentFlavor) { this.defaultFlavor = defaultFlavor; this.adminFlavor = adminFlavor; this.containerFlavor = containerFlavor; this.contentFlavor = contentFlavor; } public FlavorDefaults(ConfigserverConfig config) { this(config.defaultFlavor(), emptyIfDefault(config.defaultAdminFlavor()), emptyIfDefault(config.defaultContainerFlavor()), emptyIfDefault(config.defaultContentFlavor())); } /** Map "default" to empty - this config cannot have missing values due to the need for supporting non-hosted */ private static Optional<String> emptyIfDefault(String value) { if (Strings.isNullOrEmpty(value)) return Optional.empty(); if (value.equals("default")) return Optional.empty(); return Optional.of(value); } /** * Returns the flavor default for a given cluster type. * This may be "default" - which is an invalid value - but never null. */ public String flavor(ClusterSpec.Type clusterType) { switch (clusterType) { case admin: return adminFlavor.orElse(defaultFlavor); case container: return containerFlavor.orElse(defaultFlavor); case content: return contentFlavor.orElse(defaultFlavor); default: return defaultFlavor; } } } }
Nope.
public static Zone defaultZone() { return new Zone(SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); }
return new Zone(SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName());
public static Zone defaultZone() { return new Zone(SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); }
class Zone { private final SystemName systemName; private final FlavorDefaults flavorDefaults; private final Optional<NodeFlavors> nodeFlavors; private final ZoneId id; @Inject public Zone(ConfigserverConfig configserverConfig, NodeFlavors nodeFlavors) { this(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region()), new FlavorDefaults(configserverConfig), nodeFlavors); } /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(systemName, environment, region, new FlavorDefaults("default"), null); } private Zone(SystemName systemName, Environment environment, RegionName region, FlavorDefaults flavorDefaults, NodeFlavors nodeFlavors) { this.id = new ZoneId(environment, region); this.flavorDefaults = flavorDefaults; this.systemName = systemName; this.nodeFlavors = Optional.ofNullable(nodeFlavors); } /** Returns the id of this */ public ZoneId id() { return id; } /** Returns the current environment */ public Environment environment() { return id.environment(); } /** Returns the current region */ public RegionName region() { return id.region(); } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the default hardware flavor to assign in this zone */ public String defaultFlavor(ClusterSpec.Type clusterType) { return flavorDefaults.flavor(clusterType); } /** Returns all available node flavors for the zone, or empty if not set for this Zone. */ public Optional<NodeFlavors> nodeFlavors() { return nodeFlavors; } /** Do not use */ public String toString() { return id.toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof Zone)) return false; return Objects.equals(id, ((Zone) o).id); } public int hashCode() { return id.hashCode(); } private static class FlavorDefaults { /** The default default flavor */ private final String defaultFlavor; /** The default flavor for each cluster type, or empty to use defaultFlavor */ private final Optional<String> adminFlavor; private final Optional<String> containerFlavor; private final Optional<String> contentFlavor; /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor) { this(defaultFlavor, Optional.empty(), Optional.empty(), Optional.empty()); } /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor, Optional<String> adminFlavor, Optional<String> containerFlavor, Optional<String> contentFlavor) { this.defaultFlavor = defaultFlavor; this.adminFlavor = adminFlavor; this.containerFlavor = containerFlavor; this.contentFlavor = contentFlavor; } public FlavorDefaults(ConfigserverConfig config) { this(config.defaultFlavor(), emptyIfDefault(config.defaultAdminFlavor()), emptyIfDefault(config.defaultContainerFlavor()), emptyIfDefault(config.defaultContentFlavor())); } /** Map "default" to empty - this config cannot have missing values due to the need for supporting non-hosted */ private static Optional<String> emptyIfDefault(String value) { if (Strings.isNullOrEmpty(value)) return Optional.empty(); if (value.equals("default")) return Optional.empty(); return Optional.of(value); } /** * Returns the flavor default for a given cluster type. * This may be "default" - which is an invalid value - but never null. */ public String flavor(ClusterSpec.Type clusterType) { switch (clusterType) { case admin: return adminFlavor.orElse(defaultFlavor); case container: return containerFlavor.orElse(defaultFlavor); case content: return contentFlavor.orElse(defaultFlavor); default: return defaultFlavor; } } } }
class Zone { private final SystemName systemName; private final FlavorDefaults flavorDefaults; private final Optional<NodeFlavors> nodeFlavors; private final ZoneId id; @Inject public Zone(ConfigserverConfig configserverConfig, NodeFlavors nodeFlavors) { this(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region()), new FlavorDefaults(configserverConfig), nodeFlavors); } /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(systemName, environment, region, new FlavorDefaults("default"), null); } private Zone(SystemName systemName, Environment environment, RegionName region, FlavorDefaults flavorDefaults, NodeFlavors nodeFlavors) { this.id = ZoneId.from(environment, region); this.flavorDefaults = flavorDefaults; this.systemName = systemName; this.nodeFlavors = Optional.ofNullable(nodeFlavors); } /** Returns the id of this */ public ZoneId id() { return id; } /** Returns the current environment */ public Environment environment() { return id.environment(); } /** Returns the current region */ public RegionName region() { return id.region(); } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the default hardware flavor to assign in this zone */ public String defaultFlavor(ClusterSpec.Type clusterType) { return flavorDefaults.flavor(clusterType); } /** Returns all available node flavors for the zone, or empty if not set for this Zone. */ public Optional<NodeFlavors> nodeFlavors() { return nodeFlavors; } /** Do not use */ @Override public String toString() { return id.toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof Zone)) return false; return Objects.equals(id, ((Zone) o).id); } @Override public int hashCode() { return id.hashCode(); } private static class FlavorDefaults { /** The default default flavor */ private final String defaultFlavor; /** The default flavor for each cluster type, or empty to use defaultFlavor */ private final Optional<String> adminFlavor; private final Optional<String> containerFlavor; private final Optional<String> contentFlavor; /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor) { this(defaultFlavor, Optional.empty(), Optional.empty(), Optional.empty()); } /** Creates this with a default flavor and all cluster type flavors empty */ public FlavorDefaults(String defaultFlavor, Optional<String> adminFlavor, Optional<String> containerFlavor, Optional<String> contentFlavor) { this.defaultFlavor = defaultFlavor; this.adminFlavor = adminFlavor; this.containerFlavor = containerFlavor; this.contentFlavor = contentFlavor; } public FlavorDefaults(ConfigserverConfig config) { this(config.defaultFlavor(), emptyIfDefault(config.defaultAdminFlavor()), emptyIfDefault(config.defaultContainerFlavor()), emptyIfDefault(config.defaultContentFlavor())); } /** Map "default" to empty - this config cannot have missing values due to the need for supporting non-hosted */ private static Optional<String> emptyIfDefault(String value) { if (Strings.isNullOrEmpty(value)) return Optional.empty(); if (value.equals("default")) return Optional.empty(); return Optional.of(value); } /** * Returns the flavor default for a given cluster type. * This may be "default" - which is an invalid value - but never null. */ public String flavor(ClusterSpec.Type clusterType) { switch (clusterType) { case admin: return adminFlavor.orElse(defaultFlavor); case container: return containerFlavor.orElse(defaultFlavor); case content: return contentFlavor.orElse(defaultFlavor); default: return defaultFlavor; } } } }
X509 is not supported, use SunX509 instead
private static KeyManager[] createKeyManagersWithServiceCertificate(ZtsClient ztsClient) { try { AthenzIdentityCertificate identityCertificate = ztsClient.getIdentityCertificate(); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry("athenz-controller-key", identityCertificate.getPrivateKey(), new char[0], new Certificate[]{identityCertificate.getCertificate()}); KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("X509"); keyManagerFactory.init(keyStore, new char[0]); return keyManagerFactory.getKeyManagers(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException | CertificateException | IOException e) { throw new RuntimeException(e); } }
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("X509");
private static KeyManager[] createKeyManagersWithServiceCertificate(ZtsClient ztsClient) { try { AthenzIdentityCertificate identityCertificate = ztsClient.getIdentityCertificate(); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry("athenz-controller-key", identityCertificate.getPrivateKey(), new char[0], new Certificate[]{identityCertificate.getCertificate()}); KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, new char[0]); return keyManagerFactory.getKeyManagers(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException | CertificateException | IOException e) { throw new RuntimeException(e); } }
class AthenzSslContextProviderImpl implements AthenzSslContextProvider { private final AthenzClientFactory clientFactory; private final AthenzConfig config; @Inject public AthenzSslContextProviderImpl(AthenzClientFactory clientFactory, AthenzConfig config) { this.clientFactory = clientFactory; this.config = config; } @Override public SSLContext get() { return createSslContext(); } private SSLContext createSslContext() { try { SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); sslContext.init(createKeyManagersWithServiceCertificate(clientFactory.createZtsClientWithServicePrincipal()), createTrustManagersWithAthenzCa(config), null); return sslContext; } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new RuntimeException(e); } } private static TrustManager[] createTrustManagersWithAthenzCa(AthenzConfig config) { try { KeyStore trustStore = KeyStore.getInstance("JKS"); try (FileInputStream in = new FileInputStream(config.athenzCaTrustStore())) { trustStore.load(in, "changeit".toCharArray()); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance("X509"); trustManagerFactory.init(trustStore); return trustManagerFactory.getTrustManagers(); } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } } }
class AthenzSslContextProviderImpl implements AthenzSslContextProvider { private final AthenzClientFactory clientFactory; private final AthenzConfig config; @Inject public AthenzSslContextProviderImpl(AthenzClientFactory clientFactory, AthenzConfig config) { this.clientFactory = clientFactory; this.config = config; } @Override public SSLContext get() { return createSslContext(); } private SSLContext createSslContext() { try { SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); sslContext.init(createKeyManagersWithServiceCertificate(clientFactory.createZtsClientWithServicePrincipal()), createTrustManagersWithAthenzCa(config), null); return sslContext; } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new RuntimeException(e); } } private static TrustManager[] createTrustManagersWithAthenzCa(AthenzConfig config) { try { KeyStore trustStore = KeyStore.getInstance("JKS"); try (FileInputStream in = new FileInputStream(config.athenzCaTrustStore())) { trustStore.load(in, "changeit".toCharArray()); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); trustManagerFactory.init(trustStore); return trustManagerFactory.getTrustManagers(); } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } } }
SunX509
private static TrustManager[] createTrustManagersWithAthenzCa(AthenzConfig config) { try { KeyStore trustStore = KeyStore.getInstance("JKS"); try (FileInputStream in = new FileInputStream(config.athenzCaTrustStore())) { trustStore.load(in, "changeit".toCharArray()); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance("X509"); trustManagerFactory.init(trustStore); return trustManagerFactory.getTrustManagers(); } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance("X509");
private static TrustManager[] createTrustManagersWithAthenzCa(AthenzConfig config) { try { KeyStore trustStore = KeyStore.getInstance("JKS"); try (FileInputStream in = new FileInputStream(config.athenzCaTrustStore())) { trustStore.load(in, "changeit".toCharArray()); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); trustManagerFactory.init(trustStore); return trustManagerFactory.getTrustManagers(); } catch (CertificateException | IOException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
class AthenzSslContextProviderImpl implements AthenzSslContextProvider { private final AthenzClientFactory clientFactory; private final AthenzConfig config; @Inject public AthenzSslContextProviderImpl(AthenzClientFactory clientFactory, AthenzConfig config) { this.clientFactory = clientFactory; this.config = config; } @Override public SSLContext get() { return createSslContext(); } private SSLContext createSslContext() { try { SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); sslContext.init(createKeyManagersWithServiceCertificate(clientFactory.createZtsClientWithServicePrincipal()), createTrustManagersWithAthenzCa(config), null); return sslContext; } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new RuntimeException(e); } } private static KeyManager[] createKeyManagersWithServiceCertificate(ZtsClient ztsClient) { try { AthenzIdentityCertificate identityCertificate = ztsClient.getIdentityCertificate(); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry("athenz-controller-key", identityCertificate.getPrivateKey(), new char[0], new Certificate[]{identityCertificate.getCertificate()}); KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("X509"); keyManagerFactory.init(keyStore, new char[0]); return keyManagerFactory.getKeyManagers(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException | CertificateException | IOException e) { throw new RuntimeException(e); } } }
class AthenzSslContextProviderImpl implements AthenzSslContextProvider { private final AthenzClientFactory clientFactory; private final AthenzConfig config; @Inject public AthenzSslContextProviderImpl(AthenzClientFactory clientFactory, AthenzConfig config) { this.clientFactory = clientFactory; this.config = config; } @Override public SSLContext get() { return createSslContext(); } private SSLContext createSslContext() { try { SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); sslContext.init(createKeyManagersWithServiceCertificate(clientFactory.createZtsClientWithServicePrincipal()), createTrustManagersWithAthenzCa(config), null); return sslContext; } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new RuntimeException(e); } } private static KeyManager[] createKeyManagersWithServiceCertificate(ZtsClient ztsClient) { try { AthenzIdentityCertificate identityCertificate = ztsClient.getIdentityCertificate(); KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null); keyStore.setKeyEntry("athenz-controller-key", identityCertificate.getPrivateKey(), new char[0], new Certificate[]{identityCertificate.getCertificate()}); KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(keyStore, new char[0]); return keyManagerFactory.getKeyManagers(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException | CertificateException | IOException e) { throw new RuntimeException(e); } } }
I think this clause should be moved below the loop.
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); }
if (tenant.isAthensTenant()) {
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(new Application(applicationId), lock)); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, ZoneId zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(new Application(applicationId), lock)); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, ZoneId zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
Done. Was easier to do it in the loop, but only for the default instance.
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); }
if (tenant.isAthensTenant()) {
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(new Application(applicationId), lock)); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, ZoneId zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(new Application(applicationId), lock)); Version version; if (options.deployCurrentVersion) version = application.versionIn(zone, controller); else if (canDeployDirectlyTo(zone, options)) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.deployVersionIn(zone, controller); Optional<DeploymentJobs.JobType> jobType = DeploymentJobs.JobType.from(controller.system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! canDeployDirectlyTo(zone, options) && jobType.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, jobType.get()); application = application.withJobTriggering(jobType.get(), application.deploying(), triggering.at(), version, Optional.of(revision), triggering.reason()); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if ( ! canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.deploying().get() + " is not tested"); Deployment existingDeployment = application.deployments().get(zone); if (existingDeployment != null && existingDeployment.version().isAfter(version)) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } Optional<Rotation> rotation; try (RotationLock rotationLock = rotationRepository.lock()) { rotation = getRotation(application, zone, rotationLock); if (rotation.isPresent()) { application = application.with(rotation.get().id()); store(application); registerRotationInDns(rotation.get(), application.rotation().get().dnsName()); } } Set<String> cnames = application.rotation() .map(ApplicationRotation::dnsName) .map(Collections::singleton) .orElseGet(Collections::emptySet); Set<com.yahoo.vespa.hosted.controller.api.rotation.Rotation> rotations = rotation .map(r -> new com.yahoo.vespa.hosted.controller.api.rotation.Rotation( new com.yahoo.vespa.hosted.controller.api.identifiers.RotationId( r.id().asString()), r.name())) .map(Collections::singleton) .orElseGet(Collections::emptySet); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotations, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, revision, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, Optional.empty(), false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if (!record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Get an available rotation, if deploying to a production zone and a service ID is specified */ private Optional<Rotation> getRotation(Application application, ZoneId zone, RotationLock lock) { if (zone.environment() != Environment.prod || !application.deploymentSpec().globalServiceId().isPresent()) { return Optional.empty(); } return Optional.of(rotationRepository.getRotation(application, lock)); } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ public void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().getZone(zone.environment(), zone.region().orElse(null)).isPresent()) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
You should add an if(indexForThisConfigServer != 0)
private static List<String> createAndOrderServerList(ConfigserverConfig config) { String hostName = HostName.getLocalhost(); int configServerCount = config.zookeeperserver().size(); List<String> servers = new ArrayList<>(); int indexForThisConfigServer = 0; for (int i = 0; i < configServerCount; i++) { ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); servers.add(server.hostname() + ":" + server.port()); if (server.hostname().equals(hostName)) indexForThisConfigServer = i; } Collections.rotate(servers, configServerCount - indexForThisConfigServer); return servers; }
Collections.rotate(servers, configServerCount - indexForThisConfigServer);
private static List<String> createAndOrderServerList(ConfigserverConfig config) { String hostName = HostName.getLocalhost(); int configServerCount = config.zookeeperserver().size(); List<String> servers = new ArrayList<>(); int indexForThisConfigServer = 0; for (int i = 0; i < configServerCount; i++) { ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); servers.add(server.hostname() + ":" + server.port()); if (server.hostname().equals(hostName)) indexForThisConfigServer = i; } if (indexForThisConfigServer != 0) Collections.rotate(servers, configServerCount - indexForThisConfigServer); return servers; }
class Curator implements AutoCloseable { private static final long UNKNOWN_HOST_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private static final int ZK_SESSION_TIMEOUT = 30000; private static final int ZK_CONNECTION_TIMEOUT = 30000; private static final int baseSleepTime = 1000; private static final int maxRetries = 10; private final CuratorFramework curatorFramework; protected final RetryPolicy retryPolicy; private final String connectionSpec; private final int serverCount; /** Creates a curator instance from a comma-separated string of ZooKeeper host:port strings */ public static Curator create(String connectionSpec) { return new Curator(connectionSpec); } @Inject public Curator(ConfigserverConfig configserverConfig, ZooKeeperServer server) { this(createConnectionSpec(configserverConfig)); } private static String createConnectionSpec(ConfigserverConfig config) { List<String> servers = createAndOrderServerList(config); return String.join(",", servers); } /** * Create a curator instance which connects to the zookeeper servers given by a connection spec * on the format "hostname1:port,hostname2:port" ... */ public Curator(String connectionSpec) { Objects.requireNonNull(connectionSpec, "The curator connection spec cannot be null"); this.connectionSpec = connectionSpec; this.serverCount = connectionSpec.split(",").length; validateConnectionSpec(connectionSpec); retryPolicy = new ExponentialBackoffRetry(baseSleepTime, maxRetries); curatorFramework = CuratorFrameworkFactory.builder() .retryPolicy(retryPolicy) .sessionTimeoutMs(ZK_SESSION_TIMEOUT) .connectionTimeoutMs(ZK_CONNECTION_TIMEOUT) .connectString(connectionSpec) .zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS)) .build(); addFakeListener(); curatorFramework.start(); } protected Curator() { this.connectionSpec = ""; this.serverCount = 0; retryPolicy = new ExponentialBackoffRetry(baseSleepTime, maxRetries); curatorFramework = null; } private static void validateConnectionSpec(String connectionSpec) { if (connectionSpec == null || connectionSpec.isEmpty()) throw new IllegalArgumentException(String.format("Connections spec '%s' is not valid", connectionSpec)); } /** Returns the number of zooKeeper servers in this cluster */ public int serverCount() { return serverCount; } /** * Returns the servers in this cluster as a comma-separated list of host:port strings. * This may be empty but never null */ public String connectionSpec() { return connectionSpec; } /** For internal use; prefer creating a {@link CuratorCounter} */ public DistributedAtomicLong createAtomicCounter(String path) { return new DistributedAtomicLong(curatorFramework, path, new ExponentialBackoffRetry(baseSleepTime, maxRetries)); } /** For internal use; prefer creating a {@link com.yahoo.vespa.curator.recipes.CuratorLock} */ public InterProcessLock createMutex(String lockPath) { return new InterProcessMutex(curatorFramework, lockPath); } private void addFakeListener() { curatorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { } }); } public CompletionWaiter getCompletionWaiter(Path waiterPath, int numMembers, String id) { return CuratorCompletionWaiter.create(curatorFramework, waiterPath, numMembers, id); } public CompletionWaiter createCompletionWaiter(Path parentPath, String waiterNode, int numMembers, String id) { return CuratorCompletionWaiter.createAndInitialize(this, parentPath, waiterNode, numMembers, id); } /** Creates a listenable cache which keeps in sync with changes to all the immediate children of a path */ public DirectoryCache createDirectoryCache(String path, boolean cacheData, boolean dataIsCompressed, ExecutorService executorService) { return new PathChildrenCacheWrapper(framework(), path, cacheData, dataIsCompressed, executorService); } /** Creates a listenable cache which keeps in sync with changes to a given node */ public FileCache createFileCache(String path, boolean dataIsCompressed) { return new NodeCacheWrapper(framework(), path, dataIsCompressed); } /** A convenience method which returns whether the given path exists */ public boolean exists(Path path) { try { return framework().checkExists().forPath(path.getAbsolute()) != null; } catch (Exception e) { throw new RuntimeException("Could not check existence of " + path.getAbsolute(), e); } } /** * A convenience method which sets some content at a path. * If the path and any of its parents does not exists they are created. */ public void set(Path path, byte[] data) { String absolutePath = path.getAbsolute(); try { if ( ! exists(path)) framework().create().creatingParentsIfNeeded().forPath(absolutePath, data); else framework().setData().forPath(absolutePath, data); } catch (Exception e) { throw new RuntimeException("Could not set data at " + absolutePath, e); } } /** * Creates an empty node at a path, creating any parents as necessary. * If the node already exists nothing is done. */ public void create(Path path) { if (exists(path)) return; String absolutePath = path.getAbsolute(); try { framework().create().creatingParentsIfNeeded().forPath(absolutePath, new byte[0]); } catch (org.apache.zookeeper.KeeperException.NodeExistsException e) { } catch (Exception e) { throw new RuntimeException("Could not create " + absolutePath, e); } } /** * Creates all the given paths in a single transaction. Any paths which already exists are ignored. */ public void createAtomically(Path... paths) { try { CuratorTransaction transaction = framework().inTransaction(); for (Path path : paths) { if ( ! exists(path)) { transaction = transaction.create().forPath(path.getAbsolute(), new byte[0]).and(); } } ((CuratorTransactionFinal)transaction).commit(); } catch (Exception e) { throw new RuntimeException("Could not create " + Arrays.toString(paths), e); } } /** * Deletes the given path and any children it may have. * If the path does not exists nothing is done. */ public void delete(Path path) { if ( ! exists(path)) return; try { framework().delete().guaranteed().deletingChildrenIfNeeded().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not delete " + path.getAbsolute(), e); } } /** * Returns the names of the children at the given path. * If the path does not exist or have no children an empty list (never null) is returned. */ public List<String> getChildren(Path path) { if ( ! exists(path)) return Collections.emptyList(); try { return framework().getChildren().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not get children of " + path.getAbsolute(), e); } } /** * Returns the data at the given path, which may be a zero-length buffer if the node exists but have no data. * Empty is returned if the path does not exist. */ public Optional<byte[]> getData(Path path) { if ( ! exists(path)) return Optional.empty(); try { return Optional.of(framework().getData().forPath(path.getAbsolute())); } catch (Exception e) { throw new RuntimeException("Could not get data at " + path.getAbsolute(), e); } } /** Returns the curator framework API */ public CuratorFramework framework() { return curatorFramework; } @Override public void close() { curatorFramework.close(); } /** * Interface for waiting for completion of an operation */ public interface CompletionWaiter { /** * Awaits completion of something. Blocks until an implementation defined * condition has been met. * * @param timeout timeout for blocking await call. * @throws CompletionTimeoutException if timeout is reached without completion. */ void awaitCompletion(Duration timeout); /** * Notify completion of something. This method does not block and is called by clients * that want to notify the completion waiter that something has completed. */ void notifyCompletion(); } /** * A listenable cache of all the immediate children of a curator path. * This wraps the Curator PathChildrenCache recipe to allow us to mock it. */ public interface DirectoryCache { void start(); void addListener(PathChildrenCacheListener listener); List<ChildData> getCurrentData(); void close(); } /** * A listenable cache of the content of a single curator path. * This wraps the Curator NodeCache recipe to allow us to mock it. */ public interface FileCache { void start(); void addListener(NodeCacheListener listener); ChildData getCurrentData(); void close(); } }
class Curator implements AutoCloseable { private static final long UNKNOWN_HOST_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private static final int ZK_SESSION_TIMEOUT = 30000; private static final int ZK_CONNECTION_TIMEOUT = 30000; private static final int baseSleepTime = 1000; private static final int maxRetries = 10; private final CuratorFramework curatorFramework; protected final RetryPolicy retryPolicy; private final String connectionSpec; private final int serverCount; /** Creates a curator instance from a comma-separated string of ZooKeeper host:port strings */ public static Curator create(String connectionSpec) { return new Curator(connectionSpec); } @Inject public Curator(ConfigserverConfig configserverConfig, ZooKeeperServer server) { this(createConnectionSpec(configserverConfig)); } private static String createConnectionSpec(ConfigserverConfig config) { List<String> servers = createAndOrderServerList(config); return String.join(",", servers); } /** * Create a curator instance which connects to the zookeeper servers given by a connection spec * on the format "hostname1:port,hostname2:port" ... */ public Curator(String connectionSpec) { Objects.requireNonNull(connectionSpec, "The curator connection spec cannot be null"); this.connectionSpec = connectionSpec; this.serverCount = connectionSpec.split(",").length; validateConnectionSpec(connectionSpec); retryPolicy = new ExponentialBackoffRetry(baseSleepTime, maxRetries); curatorFramework = CuratorFrameworkFactory.builder() .retryPolicy(retryPolicy) .sessionTimeoutMs(ZK_SESSION_TIMEOUT) .connectionTimeoutMs(ZK_CONNECTION_TIMEOUT) .connectString(connectionSpec) .zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS)) .build(); addFakeListener(); curatorFramework.start(); } protected Curator() { this.connectionSpec = ""; this.serverCount = 0; retryPolicy = new ExponentialBackoffRetry(baseSleepTime, maxRetries); curatorFramework = null; } private static void validateConnectionSpec(String connectionSpec) { if (connectionSpec == null || connectionSpec.isEmpty()) throw new IllegalArgumentException(String.format("Connections spec '%s' is not valid", connectionSpec)); } /** Returns the number of zooKeeper servers in this cluster */ public int serverCount() { return serverCount; } /** * Returns the servers in this cluster as a comma-separated list of host:port strings. * This may be empty but never null */ public String connectionSpec() { return connectionSpec; } /** For internal use; prefer creating a {@link CuratorCounter} */ public DistributedAtomicLong createAtomicCounter(String path) { return new DistributedAtomicLong(curatorFramework, path, new ExponentialBackoffRetry(baseSleepTime, maxRetries)); } /** For internal use; prefer creating a {@link com.yahoo.vespa.curator.recipes.CuratorLock} */ public InterProcessLock createMutex(String lockPath) { return new InterProcessMutex(curatorFramework, lockPath); } private void addFakeListener() { curatorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { } }); } public CompletionWaiter getCompletionWaiter(Path waiterPath, int numMembers, String id) { return CuratorCompletionWaiter.create(curatorFramework, waiterPath, numMembers, id); } public CompletionWaiter createCompletionWaiter(Path parentPath, String waiterNode, int numMembers, String id) { return CuratorCompletionWaiter.createAndInitialize(this, parentPath, waiterNode, numMembers, id); } /** Creates a listenable cache which keeps in sync with changes to all the immediate children of a path */ public DirectoryCache createDirectoryCache(String path, boolean cacheData, boolean dataIsCompressed, ExecutorService executorService) { return new PathChildrenCacheWrapper(framework(), path, cacheData, dataIsCompressed, executorService); } /** Creates a listenable cache which keeps in sync with changes to a given node */ public FileCache createFileCache(String path, boolean dataIsCompressed) { return new NodeCacheWrapper(framework(), path, dataIsCompressed); } /** A convenience method which returns whether the given path exists */ public boolean exists(Path path) { try { return framework().checkExists().forPath(path.getAbsolute()) != null; } catch (Exception e) { throw new RuntimeException("Could not check existence of " + path.getAbsolute(), e); } } /** * A convenience method which sets some content at a path. * If the path and any of its parents does not exists they are created. */ public void set(Path path, byte[] data) { String absolutePath = path.getAbsolute(); try { if ( ! exists(path)) framework().create().creatingParentsIfNeeded().forPath(absolutePath, data); else framework().setData().forPath(absolutePath, data); } catch (Exception e) { throw new RuntimeException("Could not set data at " + absolutePath, e); } } /** * Creates an empty node at a path, creating any parents as necessary. * If the node already exists nothing is done. */ public void create(Path path) { if (exists(path)) return; String absolutePath = path.getAbsolute(); try { framework().create().creatingParentsIfNeeded().forPath(absolutePath, new byte[0]); } catch (org.apache.zookeeper.KeeperException.NodeExistsException e) { } catch (Exception e) { throw new RuntimeException("Could not create " + absolutePath, e); } } /** * Creates all the given paths in a single transaction. Any paths which already exists are ignored. */ public void createAtomically(Path... paths) { try { CuratorTransaction transaction = framework().inTransaction(); for (Path path : paths) { if ( ! exists(path)) { transaction = transaction.create().forPath(path.getAbsolute(), new byte[0]).and(); } } ((CuratorTransactionFinal)transaction).commit(); } catch (Exception e) { throw new RuntimeException("Could not create " + Arrays.toString(paths), e); } } /** * Deletes the given path and any children it may have. * If the path does not exists nothing is done. */ public void delete(Path path) { if ( ! exists(path)) return; try { framework().delete().guaranteed().deletingChildrenIfNeeded().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not delete " + path.getAbsolute(), e); } } /** * Returns the names of the children at the given path. * If the path does not exist or have no children an empty list (never null) is returned. */ public List<String> getChildren(Path path) { if ( ! exists(path)) return Collections.emptyList(); try { return framework().getChildren().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not get children of " + path.getAbsolute(), e); } } /** * Returns the data at the given path, which may be a zero-length buffer if the node exists but have no data. * Empty is returned if the path does not exist. */ public Optional<byte[]> getData(Path path) { if ( ! exists(path)) return Optional.empty(); try { return Optional.of(framework().getData().forPath(path.getAbsolute())); } catch (Exception e) { throw new RuntimeException("Could not get data at " + path.getAbsolute(), e); } } /** Returns the curator framework API */ public CuratorFramework framework() { return curatorFramework; } @Override public void close() { curatorFramework.close(); } /** * Interface for waiting for completion of an operation */ public interface CompletionWaiter { /** * Awaits completion of something. Blocks until an implementation defined * condition has been met. * * @param timeout timeout for blocking await call. * @throws CompletionTimeoutException if timeout is reached without completion. */ void awaitCompletion(Duration timeout); /** * Notify completion of something. This method does not block and is called by clients * that want to notify the completion waiter that something has completed. */ void notifyCompletion(); } /** * A listenable cache of all the immediate children of a curator path. * This wraps the Curator PathChildrenCache recipe to allow us to mock it. */ public interface DirectoryCache { void start(); void addListener(PathChildrenCacheListener listener); List<ChildData> getCurrentData(); void close(); } /** * A listenable cache of the content of a single curator path. * This wraps the Curator NodeCache recipe to allow us to mock it. */ public interface FileCache { void start(); void addListener(NodeCacheListener listener); ChildData getCurrentData(); void close(); } }
Use a longer list that 2 to ensure it is not testing that it is reversed.
public void require_that_server_order_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder(HostName.getLocalhost(), port2)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(2)); assertEquals(HostName.getLocalhost() + ":" + port2 + ",localhost:" + port1, curator.connectionSpec()); } }
builder.zookeeperserver(createZKBuilder(HostName.getLocalhost(), port2));
public void require_that_server_order_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder(HostName.getLocalhost(), port2)); builder.zookeeperserver(createZKBuilder("localhost", 1234)); builder.zookeeperserver(createZKBuilder("localhost", 6789)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(4)); assertTrue(curator.connectionSpec().startsWith(HostName.getLocalhost() + ":" + port2 + ",localhost:1234")); } }
class CuratorTest { private String spec1; private String spec2; private TestingServer test1; private TestingServer test2; private int port1; private int port2; @Before public void setupServers() throws Exception { port1 = allocatePort(); port2 = allocatePort(); test1 = new TestingServer(port1); test2 = new TestingServer(port2); spec1 = "localhost:" + port1; spec2 = "localhost:" + port2; } private int allocatePort() { return PortAllocator.findAvailablePort(); } @After public void teardownServers() throws IOException { test1.stop(); test1.close(); test2.close(); test2.stop(); } @Test public void require_curator_is_created_from_config() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); } } @Test public void require_that_curator_can_produce_spec() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); assertThat(curator.serverCount(), is(2)); } } @Test public void require_that_server_count_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(1)); } } @Test private ConfigserverConfig createTestConfig() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder("localhost", port2)); return new ConfigserverConfig(builder); } private ConfigserverConfig.Zookeeperserver.Builder createZKBuilder(String hostname, int port) { ConfigserverConfig.Zookeeperserver.Builder zkBuilder = new ConfigserverConfig.Zookeeperserver.Builder(); zkBuilder.hostname(hostname); zkBuilder.port(port); return zkBuilder; } private Curator createCurator(ConfigserverConfig configserverConfig) { return new Curator(configserverConfig, null); } private static class PortAllocator { private static class PortRange { private int first = 18621; private int last = 18630; private int value = first; synchronized int next() { if (value > last) { throw new RuntimeException("no port ports in range"); } return value++; } } private final static PortRange portRange = new PortRange(); public static int findAvailablePort() { return portRange.next(); } } }
class CuratorTest { private String spec1; private String spec2; private TestingServer test1; private TestingServer test2; private int port1; private int port2; @Before public void setupServers() throws Exception { port1 = allocatePort(); port2 = allocatePort(); test1 = new TestingServer(port1); test2 = new TestingServer(port2); spec1 = "localhost:" + port1; spec2 = "localhost:" + port2; } private int allocatePort() { return PortAllocator.findAvailablePort(); } @After public void teardownServers() throws IOException { test1.stop(); test1.close(); test2.close(); test2.stop(); } @Test public void require_curator_is_created_from_config() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); } } @Test public void require_that_curator_can_produce_spec() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); assertThat(curator.serverCount(), is(2)); } } @Test public void require_that_server_count_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(1)); } } @Test private ConfigserverConfig createTestConfig() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder("localhost", port2)); return new ConfigserverConfig(builder); } private ConfigserverConfig.Zookeeperserver.Builder createZKBuilder(String hostname, int port) { ConfigserverConfig.Zookeeperserver.Builder zkBuilder = new ConfigserverConfig.Zookeeperserver.Builder(); zkBuilder.hostname(hostname); zkBuilder.port(port); return zkBuilder; } private Curator createCurator(ConfigserverConfig configserverConfig) { return new Curator(configserverConfig, null); } private static class PortAllocator { private static class PortRange { private int first = 18621; private int last = 18630; private int value = first; synchronized int next() { if (value > last) { throw new RuntimeException("no port ports in range"); } return value++; } } private final static PortRange portRange = new PortRange(); public static int findAvailablePort() { return portRange.next(); } } }
fixed
public void require_that_server_order_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder(HostName.getLocalhost(), port2)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(2)); assertEquals(HostName.getLocalhost() + ":" + port2 + ",localhost:" + port1, curator.connectionSpec()); } }
builder.zookeeperserver(createZKBuilder(HostName.getLocalhost(), port2));
public void require_that_server_order_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder(HostName.getLocalhost(), port2)); builder.zookeeperserver(createZKBuilder("localhost", 1234)); builder.zookeeperserver(createZKBuilder("localhost", 6789)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(4)); assertTrue(curator.connectionSpec().startsWith(HostName.getLocalhost() + ":" + port2 + ",localhost:1234")); } }
class CuratorTest { private String spec1; private String spec2; private TestingServer test1; private TestingServer test2; private int port1; private int port2; @Before public void setupServers() throws Exception { port1 = allocatePort(); port2 = allocatePort(); test1 = new TestingServer(port1); test2 = new TestingServer(port2); spec1 = "localhost:" + port1; spec2 = "localhost:" + port2; } private int allocatePort() { return PortAllocator.findAvailablePort(); } @After public void teardownServers() throws IOException { test1.stop(); test1.close(); test2.close(); test2.stop(); } @Test public void require_curator_is_created_from_config() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); } } @Test public void require_that_curator_can_produce_spec() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); assertThat(curator.serverCount(), is(2)); } } @Test public void require_that_server_count_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(1)); } } @Test private ConfigserverConfig createTestConfig() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder("localhost", port2)); return new ConfigserverConfig(builder); } private ConfigserverConfig.Zookeeperserver.Builder createZKBuilder(String hostname, int port) { ConfigserverConfig.Zookeeperserver.Builder zkBuilder = new ConfigserverConfig.Zookeeperserver.Builder(); zkBuilder.hostname(hostname); zkBuilder.port(port); return zkBuilder; } private Curator createCurator(ConfigserverConfig configserverConfig) { return new Curator(configserverConfig, null); } private static class PortAllocator { private static class PortRange { private int first = 18621; private int last = 18630; private int value = first; synchronized int next() { if (value > last) { throw new RuntimeException("no port ports in range"); } return value++; } } private final static PortRange portRange = new PortRange(); public static int findAvailablePort() { return portRange.next(); } } }
class CuratorTest { private String spec1; private String spec2; private TestingServer test1; private TestingServer test2; private int port1; private int port2; @Before public void setupServers() throws Exception { port1 = allocatePort(); port2 = allocatePort(); test1 = new TestingServer(port1); test2 = new TestingServer(port2); spec1 = "localhost:" + port1; spec2 = "localhost:" + port2; } private int allocatePort() { return PortAllocator.findAvailablePort(); } @After public void teardownServers() throws IOException { test1.stop(); test1.close(); test2.close(); test2.stop(); } @Test public void require_curator_is_created_from_config() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); } } @Test public void require_that_curator_can_produce_spec() { try (Curator curator = createCurator(createTestConfig())) { assertThat(curator.connectionSpec(), is(spec1 + "," + spec2)); assertThat(curator.serverCount(), is(2)); } } @Test public void require_that_server_count_is_correct() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); try (Curator curator = createCurator(new ConfigserverConfig(builder))) { assertThat(curator.serverCount(), is(1)); } } @Test private ConfigserverConfig createTestConfig() { ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); builder.zookeeperserver(createZKBuilder("localhost", port1)); builder.zookeeperserver(createZKBuilder("localhost", port2)); return new ConfigserverConfig(builder); } private ConfigserverConfig.Zookeeperserver.Builder createZKBuilder(String hostname, int port) { ConfigserverConfig.Zookeeperserver.Builder zkBuilder = new ConfigserverConfig.Zookeeperserver.Builder(); zkBuilder.hostname(hostname); zkBuilder.port(port); return zkBuilder; } private Curator createCurator(ConfigserverConfig configserverConfig) { return new Curator(configserverConfig, null); } private static class PortAllocator { private static class PortRange { private int first = 18621; private int last = 18630; private int value = first; synchronized int next() { if (value > last) { throw new RuntimeException("no port ports in range"); } return value++; } } private final static PortRange portRange = new PortRange(); public static int findAvailablePort() { return portRange.next(); } } }
Should maybe compare file contents? I would expect `WriteFileTask` to overwrite an old file should the contents differ.
public boolean execute(TaskContext context) { final FileSystemPath path = context.getFileSystem().withPath(params.path); if (path.isRegularFile()) { return false; } context.executeSubtask(new MakeDirectoryTask(params.path.getParent()).withParents()); path.writeUtf8File(params.contentProducer.call()) .setPermissions("rw-r--r--") .setOwner("root") .setGroup("root"); return true; }
if (path.isRegularFile()) {
public boolean execute(TaskContext context) { final FileSystemPath fileSystemPath = context.getFileSystem().withPath(path); if (fileSystemPath.isRegularFile()) { return false; } context.executeSubtask(new MakeDirectoryTask(path.getParent()).withParents()); String content = contentProducer.call(); fileSystemPath.writeUtf8File(content); permissions.ifPresent(fileSystemPath::setPermissions); owner.ifPresent(fileSystemPath::setOwner); group.ifPresent(fileSystemPath::setGroup); return true; }
class Params { private final Path path; private final Producer<String> contentProducer; private Optional<String> user = Optional.empty(); private Optional<String> group = Optional.empty(); private Optional<String> permissions = Optional.empty(); public Params(Path path, Producer<String> contentProducer) { this.path = path; this.contentProducer = contentProducer; } public Params withUser(String user) { this.user = Optional.of(user); return this; } public Params withGroup(String group) { this.group = Optional.of(group); return this; } /** * @param permissions of the form "rwxr-x---". */ public Params withPermissions(String permissions) { this.permissions = Optional.of(permissions); return this; } }
class WriteFileTask implements Task { private final Path path; private final Producer<String> contentProducer; private Optional<String> owner = Optional.empty(); private Optional<String> group = Optional.empty(); private Optional<String> permissions = Optional.empty(); public WriteFileTask(Path path, Producer<String> contentProducer) { this.path = path; this.contentProducer = contentProducer; } public WriteFileTask withOwner(String owner) { this.owner = Optional.of(owner); return this; } public WriteFileTask withGroup(String group) { this.group = Optional.of(group); return this; } /** * @param permissions of the form "rwxr-x---". */ public WriteFileTask withPermissions(String permissions) { this.permissions = Optional.of(permissions); return this; } @Override public Path getPath() { return path; } public Producer<String> getContentProducer() { return contentProducer; } public Optional<String> getOwner() { return owner; } public Optional<String> getGroup() { return group; } public Optional<String> getPermissions() { return permissions; } }
Let me add a TODO for that. Some file content may be expensive to compute compared to an simpler check like existence(or timestamp, so perhaps this needs to be controlled by parameters.
public boolean execute(TaskContext context) { final FileSystemPath path = context.getFileSystem().withPath(params.path); if (path.isRegularFile()) { return false; } context.executeSubtask(new MakeDirectoryTask(params.path.getParent()).withParents()); path.writeUtf8File(params.contentProducer.call()) .setPermissions("rw-r--r--") .setOwner("root") .setGroup("root"); return true; }
if (path.isRegularFile()) {
public boolean execute(TaskContext context) { final FileSystemPath fileSystemPath = context.getFileSystem().withPath(path); if (fileSystemPath.isRegularFile()) { return false; } context.executeSubtask(new MakeDirectoryTask(path.getParent()).withParents()); String content = contentProducer.call(); fileSystemPath.writeUtf8File(content); permissions.ifPresent(fileSystemPath::setPermissions); owner.ifPresent(fileSystemPath::setOwner); group.ifPresent(fileSystemPath::setGroup); return true; }
class Params { private final Path path; private final Producer<String> contentProducer; private Optional<String> user = Optional.empty(); private Optional<String> group = Optional.empty(); private Optional<String> permissions = Optional.empty(); public Params(Path path, Producer<String> contentProducer) { this.path = path; this.contentProducer = contentProducer; } public Params withUser(String user) { this.user = Optional.of(user); return this; } public Params withGroup(String group) { this.group = Optional.of(group); return this; } /** * @param permissions of the form "rwxr-x---". */ public Params withPermissions(String permissions) { this.permissions = Optional.of(permissions); return this; } }
class WriteFileTask implements Task { private final Path path; private final Producer<String> contentProducer; private Optional<String> owner = Optional.empty(); private Optional<String> group = Optional.empty(); private Optional<String> permissions = Optional.empty(); public WriteFileTask(Path path, Producer<String> contentProducer) { this.path = path; this.contentProducer = contentProducer; } public WriteFileTask withOwner(String owner) { this.owner = Optional.of(owner); return this; } public WriteFileTask withGroup(String group) { this.group = Optional.of(group); return this; } /** * @param permissions of the form "rwxr-x---". */ public WriteFileTask withPermissions(String permissions) { this.permissions = Optional.of(permissions); return this; } @Override public Path getPath() { return path; } public Producer<String> getContentProducer() { return contentProducer; } public Optional<String> getOwner() { return owner; } public Optional<String> getGroup() { return group; } public Optional<String> getPermissions() { return permissions; } }
Actually there is a TODO below, but let me move it up here and rephrase.
public boolean execute(TaskContext context) { final FileSystemPath path = context.getFileSystem().withPath(params.path); if (path.isRegularFile()) { return false; } context.executeSubtask(new MakeDirectoryTask(params.path.getParent()).withParents()); path.writeUtf8File(params.contentProducer.call()) .setPermissions("rw-r--r--") .setOwner("root") .setGroup("root"); return true; }
if (path.isRegularFile()) {
public boolean execute(TaskContext context) { final FileSystemPath fileSystemPath = context.getFileSystem().withPath(path); if (fileSystemPath.isRegularFile()) { return false; } context.executeSubtask(new MakeDirectoryTask(path.getParent()).withParents()); String content = contentProducer.call(); fileSystemPath.writeUtf8File(content); permissions.ifPresent(fileSystemPath::setPermissions); owner.ifPresent(fileSystemPath::setOwner); group.ifPresent(fileSystemPath::setGroup); return true; }
class Params { private final Path path; private final Producer<String> contentProducer; private Optional<String> user = Optional.empty(); private Optional<String> group = Optional.empty(); private Optional<String> permissions = Optional.empty(); public Params(Path path, Producer<String> contentProducer) { this.path = path; this.contentProducer = contentProducer; } public Params withUser(String user) { this.user = Optional.of(user); return this; } public Params withGroup(String group) { this.group = Optional.of(group); return this; } /** * @param permissions of the form "rwxr-x---". */ public Params withPermissions(String permissions) { this.permissions = Optional.of(permissions); return this; } }
class WriteFileTask implements Task { private final Path path; private final Producer<String> contentProducer; private Optional<String> owner = Optional.empty(); private Optional<String> group = Optional.empty(); private Optional<String> permissions = Optional.empty(); public WriteFileTask(Path path, Producer<String> contentProducer) { this.path = path; this.contentProducer = contentProducer; } public WriteFileTask withOwner(String owner) { this.owner = Optional.of(owner); return this; } public WriteFileTask withGroup(String group) { this.group = Optional.of(group); return this; } /** * @param permissions of the form "rwxr-x---". */ public WriteFileTask withPermissions(String permissions) { this.permissions = Optional.of(permissions); return this; } @Override public Path getPath() { return path; } public Producer<String> getContentProducer() { return contentProducer; } public Optional<String> getOwner() { return owner; } public Optional<String> getGroup() { return group; } public Optional<String> getPermissions() { return permissions; } }
Why not just `Producer<String> contentProducer = () -> content;` ?
public void testWrite() { Path path = Paths.get("foo"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<WriteFileTask> writeFileTaskArgumentCaptor = ArgumentCaptor.forClass(WriteFileTask.class); verify(contextMock, times(1)) .executeSubtask(writeFileTaskArgumentCaptor.capture()); }
Producer<String> contentProducer = (Producer<String>) mock(Producer.class);
public void testWrite() { Path parentDirectory = Paths.get("/foo"); Path path = parentDirectory.resolve("bar"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(contentProducer, times(1)).call(); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<MakeDirectoryTask> makeDirectoryTaskCaptor = ArgumentCaptor.forClass(MakeDirectoryTask.class); verify(contextMock, times(1)) .executeSubtask(makeDirectoryTaskCaptor.capture()); MakeDirectoryTask makeDirectoryTask = makeDirectoryTaskCaptor.getValue(); assertEquals(parentDirectory, makeDirectoryTask.getPath()); assertTrue(makeDirectoryTask.getWithParents()); }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
What is this for? I don't see it being used
public void testWrite() { Path path = Paths.get("foo"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<WriteFileTask> writeFileTaskArgumentCaptor = ArgumentCaptor.forClass(WriteFileTask.class); verify(contextMock, times(1)) .executeSubtask(writeFileTaskArgumentCaptor.capture()); }
ArgumentCaptor<WriteFileTask> writeFileTaskArgumentCaptor =
public void testWrite() { Path parentDirectory = Paths.get("/foo"); Path path = parentDirectory.resolve("bar"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(contentProducer, times(1)).call(); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<MakeDirectoryTask> makeDirectoryTaskCaptor = ArgumentCaptor.forClass(MakeDirectoryTask.class); verify(contextMock, times(1)) .executeSubtask(makeDirectoryTaskCaptor.capture()); MakeDirectoryTask makeDirectoryTask = makeDirectoryTaskCaptor.getValue(); assertEquals(parentDirectory, makeDirectoryTask.getPath()); assertTrue(makeDirectoryTask.getWithParents()); }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
I wanted to verify it was being called once: Added
public void testWrite() { Path path = Paths.get("foo"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<WriteFileTask> writeFileTaskArgumentCaptor = ArgumentCaptor.forClass(WriteFileTask.class); verify(contextMock, times(1)) .executeSubtask(writeFileTaskArgumentCaptor.capture()); }
Producer<String> contentProducer = (Producer<String>) mock(Producer.class);
public void testWrite() { Path parentDirectory = Paths.get("/foo"); Path path = parentDirectory.resolve("bar"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(contentProducer, times(1)).call(); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<MakeDirectoryTask> makeDirectoryTaskCaptor = ArgumentCaptor.forClass(MakeDirectoryTask.class); verify(contextMock, times(1)) .executeSubtask(makeDirectoryTaskCaptor.capture()); MakeDirectoryTask makeDirectoryTask = makeDirectoryTaskCaptor.getValue(); assertEquals(parentDirectory, makeDirectoryTask.getPath()); assertTrue(makeDirectoryTask.getWithParents()); }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
Added verification of directory creation subtask.
public void testWrite() { Path path = Paths.get("foo"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<WriteFileTask> writeFileTaskArgumentCaptor = ArgumentCaptor.forClass(WriteFileTask.class); verify(contextMock, times(1)) .executeSubtask(writeFileTaskArgumentCaptor.capture()); }
ArgumentCaptor<WriteFileTask> writeFileTaskArgumentCaptor =
public void testWrite() { Path parentDirectory = Paths.get("/foo"); Path path = parentDirectory.resolve("bar"); @SuppressWarnings("unchecked") Producer<String> contentProducer = (Producer<String>) mock(Producer.class); when(contentProducer.call()).thenReturn(content); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, contentProducer) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(false); when(contextMock.executeSubtask(any(MakeDirectoryTask.class))).thenReturn(false); assertTrue(task.execute(contextMock)); verify(contentProducer, times(1)).call(); verify(fileSystemMock).writeUtf8File(path, content); verify(fileSystemMock).setPermissions(path, permissions); verify(fileSystemMock).setOwner(path, owner); verify(fileSystemMock).setGroup(path, group); ArgumentCaptor<MakeDirectoryTask> makeDirectoryTaskCaptor = ArgumentCaptor.forClass(MakeDirectoryTask.class); verify(contextMock, times(1)) .executeSubtask(makeDirectoryTaskCaptor.capture()); MakeDirectoryTask makeDirectoryTask = makeDirectoryTaskCaptor.getValue(); assertEquals(parentDirectory, makeDirectoryTask.getPath()); assertTrue(makeDirectoryTask.getWithParents()); }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
class WriteFileTaskTest extends TaskTestBase { private final String content = "line1\nline2\n"; @Test @Test public void fileAlreadyExists() { Path path = Paths.get("foo"); final String permissions = "rwxr-x---"; final String owner = "owner"; final String group = "group"; WriteFileTask task = new WriteFileTask(path, () -> content) .withPermissions(permissions) .withOwner(owner) .withGroup(group); when(fileSystemMock.isRegularFile(path)).thenReturn(true); assertFalse(task.execute(contextMock)); } }
Isn't the doc in disagreement with this return? When an upgrade is cancelled, it wasn't _successful on the ... change_, but it does clear the `deploying()`.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Is it possible to have production jobs with no zone? Let me answer that: Post verification jobs, for instance, *are* not a deployment, even though they live in a zone. Ok. And, of course, when these exist, which isn't yet.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! zone.isPresent()) continue;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }