comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Ok, test input should be simplified as well then, second `<parallel>` tag can be removed.
public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>\n" + " <prod>\n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " <parallel>\n" + " <region active='true'>eu-west-1</region>\n" + " <region active='true'>us-central-1</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } }
assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage());
public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>\n" + " <prod>\n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } }
class DeploymentSpecTest { @Test public void testSpec() { String specXml = "<deployment version='1.0'>" + " <test/>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertFalse(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(2, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(4, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test/>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(5, spec.steps().size()); assertEquals(4, spec.zones().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds()); assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.globalServiceId().get()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.upgradePolicy().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='23'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage()); } } @Test public void testEmpty() { assertFalse(DeploymentSpec.empty.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy()); assertTrue(DeploymentSpec.empty.steps().isEmpty()); assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>\n" + " <prod> \n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3)); assertEquals(2, parallelZones.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get()); } @Test }
class DeploymentSpecTest { @Test public void testSpec() { String specXml = "<deployment version='1.0'>" + " <test/>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertFalse(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(2, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(4, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test/>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(5, spec.steps().size()); assertEquals(4, spec.zones().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds()); assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.globalServiceId().get()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.upgradePolicy().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='23'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage()); } } @Test public void testEmpty() { assertFalse(DeploymentSpec.empty.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy()); assertTrue(DeploymentSpec.empty.steps().isEmpty()); assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>\n" + " <prod> \n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3)); assertEquals(2, parallelZones.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get()); } @Test }
ok, done
public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>\n" + " <prod>\n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " <parallel>\n" + " <region active='true'>eu-west-1</region>\n" + " <region active='true'>us-central-1</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } }
assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage());
public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>\n" + " <prod>\n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } }
class DeploymentSpecTest { @Test public void testSpec() { String specXml = "<deployment version='1.0'>" + " <test/>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertFalse(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(2, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(4, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test/>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(5, spec.steps().size()); assertEquals(4, spec.zones().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds()); assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.globalServiceId().get()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.upgradePolicy().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='23'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage()); } } @Test public void testEmpty() { assertFalse(DeploymentSpec.empty.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy()); assertTrue(DeploymentSpec.empty.steps().isEmpty()); assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>\n" + " <prod> \n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3)); assertEquals(2, parallelZones.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get()); } @Test }
class DeploymentSpecTest { @Test public void testSpec() { String specXml = "<deployment version='1.0'>" + " <test/>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertFalse(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(2, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertFalse(spec.includes(Environment.prod, Optional.empty())); assertFalse(spec.globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(4, spec.steps().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test/>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(5, spec.steps().size()); assertEquals(4, spec.zones().size()); assertTrue(spec.steps().get(0).deploysTo(Environment.test)); assertTrue(spec.steps().get(1).deploysTo(Environment.staging)); assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active()); assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds()); assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active()); assertTrue(spec.includes(Environment.test, Optional.empty())); assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1")))); assertTrue(spec.includes(Environment.staging, Optional.empty())); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <test global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <staging global-service-id='query' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.globalServiceId().get()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.upgradePolicy().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='23'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage()); } } @Test public void testEmpty() { assertFalse(DeploymentSpec.empty.globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy()); assertTrue(DeploymentSpec.empty.steps().isEmpty()); assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>\n" + " <prod> \n" + " <region active='true'>us-west-1</region>\n" + " <parallel>\n" + " <region active='true'>us-central-1</region>\n" + " <region active='true'>us-east-3</region>\n" + " </parallel>\n" + " </prod>\n" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3)); assertEquals(2, parallelZones.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get()); } @Test }
I think you should remove this if block. DeploymentSpec.steps() should be authorative on which steps to perform.
public boolean isLast(JobType job, Application application) { List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); if (deploymentSteps.isEmpty()) { return false; } DeploymentSpec.Step lastStep = deploymentSteps.get(deploymentSteps.size() - 1); return fromJob(job, application).get().equals(lastStep); }
return fromJob(job, application).get().equals(lastStep);
public boolean isLast(JobType job, Application application) { List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); if (deploymentSteps.isEmpty()) { return false; } DeploymentSpec.Step lastStep = deploymentSteps.get(deploymentSteps.size() - 1); return fromJob(job, application).get().equals(lastStep); }
class DeploymentOrder { private static final Logger log = Logger.getLogger(DeploymentOrder.class.getName()); private final Controller controller; private final Clock clock; public DeploymentOrder(Controller controller) { this.controller = controller; this.clock = controller.clock(); } /** Returns a list of jobs to trigger after the given job */ public List<JobType> nextAfter(JobType job, Application application) { if (job == JobType.component) { return Collections.singletonList(JobType.systemTest); } List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); Optional<DeploymentSpec.Step> currentStep = fromJob(job, application); if (!currentStep.isPresent()) { return Collections.emptyList(); } if (isLast(job, application)) { return Collections.emptyList(); } Duration delay = delayAfter(currentStep.get(), application); if (postponeDeployment(delay, job, application)) { log.info(String.format("Delaying next job after %s of %s by %s", job, application, delay)); return Collections.emptyList(); } DeploymentSpec.Step nextStep = deploymentSteps.get(deploymentSteps.indexOf(currentStep.get()) + 1); if (nextStep instanceof DeploymentSpec.DeclaredZone) { return Collections.singletonList(toJob((DeploymentSpec.DeclaredZone) nextStep)); } else if (nextStep instanceof DeploymentSpec.ParallelZones) { return ((DeploymentSpec.ParallelZones) nextStep).zones().stream() .map(this::toJob) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } else { throw new IllegalStateException("Unexpected step type: " + nextStep.getClass()); } } /** Returns whether the given job is first in a deployment */ public boolean isFirst(JobType job) { return job == JobType.component; } /** Returns whether the given job is last in a deployment */ /** Returns jobs for given deployment spec, in the order they are declared */ public List<JobType> jobsFrom(DeploymentSpec deploymentSpec) { if (deploymentSpec.steps().isEmpty()) { return Arrays.asList(JobType.systemTest, JobType.stagingTest); } List<JobType> jobs = new ArrayList<>(); for (DeploymentSpec.Step step : deploymentSpec.steps()) { if (step instanceof DeploymentSpec.DeclaredZone) { jobs.add(toJob((DeploymentSpec.DeclaredZone) step)); } else if (step instanceof DeploymentSpec.ParallelZones) { ((DeploymentSpec.ParallelZones) step).zones().forEach(zone -> jobs.add(toJob(zone))); } } return Collections.unmodifiableList(jobs); } /** Resolve deployment step from job */ private Optional<DeploymentSpec.Step> fromJob(JobType job, Application application) { for (DeploymentSpec.Step step : application.deploymentSpec().steps()) { if (step.deploysTo(job.environment(), job.isProduction() ? job.region(controller.system()) : Optional.empty())) { return Optional.of(step); } } return Optional.empty(); } /** Resolve job from deployment step */ private JobType toJob(DeploymentSpec.DeclaredZone zone) { return JobType.from(controller.system(), zone.environment(), zone.region().orElse(null)); } /** Returns whether deployment should be postponed according to delay */ private boolean postponeDeployment(Duration delay, JobType job, Application application) { Optional<Instant> lastSuccess = Optional.ofNullable(application.deploymentJobs().jobStatus().get(job)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::at); return lastSuccess.isPresent() && lastSuccess.get().plus(delay).isAfter(clock.instant()); } /** Find all steps that deploy to one or more zones */ private static List<DeploymentSpec.Step> deploymentSteps(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step instanceof DeploymentSpec.DeclaredZone || step instanceof DeploymentSpec.ParallelZones) .collect(Collectors.toList()); } /** Determines the delay that should pass after the given step */ private static Duration delayAfter(DeploymentSpec.Step step, Application application) { int stepIndex = application.deploymentSpec().steps().indexOf(step); if (stepIndex == -1 || stepIndex == application.deploymentSpec().steps().size() - 1) { return Duration.ZERO; } Duration totalDelay = Duration.ZERO; List<DeploymentSpec.Step> remainingSteps = application.deploymentSpec().steps() .subList(stepIndex + 1, application.deploymentSpec().steps().size()); for (DeploymentSpec.Step s : remainingSteps) { if (!(s instanceof DeploymentSpec.Delay)) { break; } totalDelay = totalDelay.plus(((DeploymentSpec.Delay) s).duration()); } return totalDelay; } }
class DeploymentOrder { private static final Logger log = Logger.getLogger(DeploymentOrder.class.getName()); private final Controller controller; private final Clock clock; public DeploymentOrder(Controller controller) { Objects.requireNonNull(controller, "controller cannot be null"); this.controller = controller; this.clock = controller.clock(); } /** Returns a list of jobs to trigger after the given job */ public List<JobType> nextAfter(JobType job, Application application) { if (job == JobType.component) { return Collections.singletonList(JobType.systemTest); } List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); Optional<DeploymentSpec.Step> currentStep = fromJob(job, application); if ( ! currentStep.isPresent()) { return Collections.emptyList(); } int currentIndex = deploymentSteps.indexOf(currentStep.get()); if (currentIndex == deploymentSteps.size() - 1) { return Collections.emptyList(); } if (!completedSuccessfully(currentStep.get(), application)) { return Collections.emptyList(); } Duration delay = delayAfter(currentStep.get(), application); if (postponeDeployment(delay, job, application)) { log.info(String.format("Delaying next job after %s of %s by %s", job, application, delay)); return Collections.emptyList(); } DeploymentSpec.Step nextStep = deploymentSteps.get(currentIndex + 1); return nextStep.zones().stream() .map(this::toJob) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns whether the given job is first in a deployment */ public boolean isFirst(JobType job) { return job == JobType.component; } /** Returns whether the given job is last in a deployment */ /** Returns jobs for given deployment spec, in the order they are declared */ public List<JobType> jobsFrom(DeploymentSpec deploymentSpec) { return deploymentSpec.steps().stream() .flatMap(step -> jobsFrom(step).stream()) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns jobs for the given step */ private List<JobType> jobsFrom(DeploymentSpec.Step step) { return step.zones().stream() .map(this::toJob) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns whether all jobs have completed successfully for given step */ private boolean completedSuccessfully(DeploymentSpec.Step step, Application application) { return jobsFrom(step).stream() .allMatch(job -> application.deploymentJobs().isSuccessful(application.deploying().get(), job)); } /** Resolve deployment step from job */ private Optional<DeploymentSpec.Step> fromJob(JobType job, Application application) { for (DeploymentSpec.Step step : application.deploymentSpec().steps()) { if (step.deploysTo(job.environment(), job.isProduction() ? job.region(controller.system()) : Optional.empty())) { return Optional.of(step); } } return Optional.empty(); } /** Resolve job from deployment step */ private JobType toJob(DeploymentSpec.DeclaredZone zone) { return JobType.from(controller.system(), zone.environment(), zone.region().orElse(null)); } /** Returns whether deployment should be postponed according to delay */ private boolean postponeDeployment(Duration delay, JobType job, Application application) { Optional<Instant> lastSuccess = Optional.ofNullable(application.deploymentJobs().jobStatus().get(job)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::at); return lastSuccess.isPresent() && lastSuccess.get().plus(delay).isAfter(clock.instant()); } /** Find all steps that deploy to one or more zones */ private static List<DeploymentSpec.Step> deploymentSteps(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step instanceof DeploymentSpec.DeclaredZone || step instanceof DeploymentSpec.ParallelZones) .collect(Collectors.toList()); } /** Determines the delay that should pass after the given step */ private static Duration delayAfter(DeploymentSpec.Step step, Application application) { int stepIndex = application.deploymentSpec().steps().indexOf(step); if (stepIndex == -1 || stepIndex == application.deploymentSpec().steps().size() - 1) { return Duration.ZERO; } Duration totalDelay = Duration.ZERO; List<DeploymentSpec.Step> remainingSteps = application.deploymentSpec().steps() .subList(stepIndex + 1, application.deploymentSpec().steps().size()); for (DeploymentSpec.Step s : remainingSteps) { if (!(s instanceof DeploymentSpec.Delay)) { break; } totalDelay = totalDelay.plus(((DeploymentSpec.Delay) s).duration()); } return totalDelay; } }
Yes, it stems from the fact that a test breaks encapsulation by calling into DeploymentTrigger directly. I'll fix the test and remove the empty check.
public boolean isLast(JobType job, Application application) { List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); if (deploymentSteps.isEmpty()) { return false; } DeploymentSpec.Step lastStep = deploymentSteps.get(deploymentSteps.size() - 1); return fromJob(job, application).get().equals(lastStep); }
return fromJob(job, application).get().equals(lastStep);
public boolean isLast(JobType job, Application application) { List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); if (deploymentSteps.isEmpty()) { return false; } DeploymentSpec.Step lastStep = deploymentSteps.get(deploymentSteps.size() - 1); return fromJob(job, application).get().equals(lastStep); }
class DeploymentOrder { private static final Logger log = Logger.getLogger(DeploymentOrder.class.getName()); private final Controller controller; private final Clock clock; public DeploymentOrder(Controller controller) { this.controller = controller; this.clock = controller.clock(); } /** Returns a list of jobs to trigger after the given job */ public List<JobType> nextAfter(JobType job, Application application) { if (job == JobType.component) { return Collections.singletonList(JobType.systemTest); } List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); Optional<DeploymentSpec.Step> currentStep = fromJob(job, application); if (!currentStep.isPresent()) { return Collections.emptyList(); } if (isLast(job, application)) { return Collections.emptyList(); } Duration delay = delayAfter(currentStep.get(), application); if (postponeDeployment(delay, job, application)) { log.info(String.format("Delaying next job after %s of %s by %s", job, application, delay)); return Collections.emptyList(); } DeploymentSpec.Step nextStep = deploymentSteps.get(deploymentSteps.indexOf(currentStep.get()) + 1); if (nextStep instanceof DeploymentSpec.DeclaredZone) { return Collections.singletonList(toJob((DeploymentSpec.DeclaredZone) nextStep)); } else if (nextStep instanceof DeploymentSpec.ParallelZones) { return ((DeploymentSpec.ParallelZones) nextStep).zones().stream() .map(this::toJob) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } else { throw new IllegalStateException("Unexpected step type: " + nextStep.getClass()); } } /** Returns whether the given job is first in a deployment */ public boolean isFirst(JobType job) { return job == JobType.component; } /** Returns whether the given job is last in a deployment */ /** Returns jobs for given deployment spec, in the order they are declared */ public List<JobType> jobsFrom(DeploymentSpec deploymentSpec) { if (deploymentSpec.steps().isEmpty()) { return Arrays.asList(JobType.systemTest, JobType.stagingTest); } List<JobType> jobs = new ArrayList<>(); for (DeploymentSpec.Step step : deploymentSpec.steps()) { if (step instanceof DeploymentSpec.DeclaredZone) { jobs.add(toJob((DeploymentSpec.DeclaredZone) step)); } else if (step instanceof DeploymentSpec.ParallelZones) { ((DeploymentSpec.ParallelZones) step).zones().forEach(zone -> jobs.add(toJob(zone))); } } return Collections.unmodifiableList(jobs); } /** Resolve deployment step from job */ private Optional<DeploymentSpec.Step> fromJob(JobType job, Application application) { for (DeploymentSpec.Step step : application.deploymentSpec().steps()) { if (step.deploysTo(job.environment(), job.isProduction() ? job.region(controller.system()) : Optional.empty())) { return Optional.of(step); } } return Optional.empty(); } /** Resolve job from deployment step */ private JobType toJob(DeploymentSpec.DeclaredZone zone) { return JobType.from(controller.system(), zone.environment(), zone.region().orElse(null)); } /** Returns whether deployment should be postponed according to delay */ private boolean postponeDeployment(Duration delay, JobType job, Application application) { Optional<Instant> lastSuccess = Optional.ofNullable(application.deploymentJobs().jobStatus().get(job)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::at); return lastSuccess.isPresent() && lastSuccess.get().plus(delay).isAfter(clock.instant()); } /** Find all steps that deploy to one or more zones */ private static List<DeploymentSpec.Step> deploymentSteps(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step instanceof DeploymentSpec.DeclaredZone || step instanceof DeploymentSpec.ParallelZones) .collect(Collectors.toList()); } /** Determines the delay that should pass after the given step */ private static Duration delayAfter(DeploymentSpec.Step step, Application application) { int stepIndex = application.deploymentSpec().steps().indexOf(step); if (stepIndex == -1 || stepIndex == application.deploymentSpec().steps().size() - 1) { return Duration.ZERO; } Duration totalDelay = Duration.ZERO; List<DeploymentSpec.Step> remainingSteps = application.deploymentSpec().steps() .subList(stepIndex + 1, application.deploymentSpec().steps().size()); for (DeploymentSpec.Step s : remainingSteps) { if (!(s instanceof DeploymentSpec.Delay)) { break; } totalDelay = totalDelay.plus(((DeploymentSpec.Delay) s).duration()); } return totalDelay; } }
class DeploymentOrder { private static final Logger log = Logger.getLogger(DeploymentOrder.class.getName()); private final Controller controller; private final Clock clock; public DeploymentOrder(Controller controller) { Objects.requireNonNull(controller, "controller cannot be null"); this.controller = controller; this.clock = controller.clock(); } /** Returns a list of jobs to trigger after the given job */ public List<JobType> nextAfter(JobType job, Application application) { if (job == JobType.component) { return Collections.singletonList(JobType.systemTest); } List<DeploymentSpec.Step> deploymentSteps = deploymentSteps(application); Optional<DeploymentSpec.Step> currentStep = fromJob(job, application); if ( ! currentStep.isPresent()) { return Collections.emptyList(); } int currentIndex = deploymentSteps.indexOf(currentStep.get()); if (currentIndex == deploymentSteps.size() - 1) { return Collections.emptyList(); } if (!completedSuccessfully(currentStep.get(), application)) { return Collections.emptyList(); } Duration delay = delayAfter(currentStep.get(), application); if (postponeDeployment(delay, job, application)) { log.info(String.format("Delaying next job after %s of %s by %s", job, application, delay)); return Collections.emptyList(); } DeploymentSpec.Step nextStep = deploymentSteps.get(currentIndex + 1); return nextStep.zones().stream() .map(this::toJob) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns whether the given job is first in a deployment */ public boolean isFirst(JobType job) { return job == JobType.component; } /** Returns whether the given job is last in a deployment */ /** Returns jobs for given deployment spec, in the order they are declared */ public List<JobType> jobsFrom(DeploymentSpec deploymentSpec) { return deploymentSpec.steps().stream() .flatMap(step -> jobsFrom(step).stream()) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns jobs for the given step */ private List<JobType> jobsFrom(DeploymentSpec.Step step) { return step.zones().stream() .map(this::toJob) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns whether all jobs have completed successfully for given step */ private boolean completedSuccessfully(DeploymentSpec.Step step, Application application) { return jobsFrom(step).stream() .allMatch(job -> application.deploymentJobs().isSuccessful(application.deploying().get(), job)); } /** Resolve deployment step from job */ private Optional<DeploymentSpec.Step> fromJob(JobType job, Application application) { for (DeploymentSpec.Step step : application.deploymentSpec().steps()) { if (step.deploysTo(job.environment(), job.isProduction() ? job.region(controller.system()) : Optional.empty())) { return Optional.of(step); } } return Optional.empty(); } /** Resolve job from deployment step */ private JobType toJob(DeploymentSpec.DeclaredZone zone) { return JobType.from(controller.system(), zone.environment(), zone.region().orElse(null)); } /** Returns whether deployment should be postponed according to delay */ private boolean postponeDeployment(Duration delay, JobType job, Application application) { Optional<Instant> lastSuccess = Optional.ofNullable(application.deploymentJobs().jobStatus().get(job)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::at); return lastSuccess.isPresent() && lastSuccess.get().plus(delay).isAfter(clock.instant()); } /** Find all steps that deploy to one or more zones */ private static List<DeploymentSpec.Step> deploymentSteps(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step instanceof DeploymentSpec.DeclaredZone || step instanceof DeploymentSpec.ParallelZones) .collect(Collectors.toList()); } /** Determines the delay that should pass after the given step */ private static Duration delayAfter(DeploymentSpec.Step step, Application application) { int stepIndex = application.deploymentSpec().steps().indexOf(step); if (stepIndex == -1 || stepIndex == application.deploymentSpec().steps().size() - 1) { return Duration.ZERO; } Duration totalDelay = Duration.ZERO; List<DeploymentSpec.Step> remainingSteps = application.deploymentSpec().steps() .subList(stepIndex + 1, application.deploymentSpec().steps().size()); for (DeploymentSpec.Step s : remainingSteps) { if (!(s instanceof DeploymentSpec.Delay)) { break; } totalDelay = totalDelay.plus(((DeploymentSpec.Delay) s).duration()); } return totalDelay; } }
Would you care to extend this, to verify triggering after the parallel jobs complete?
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
}
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1, JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.buildSystem().takeJobsToRun(); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsEast3, application, true); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
Yes, will fix.
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
}
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1, JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.buildSystem().takeJobsToRun(); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsEast3, application, true); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
Don't we need to ensure in general that all parallel jobs complete before progressing to the next step?
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
}
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1, JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.buildSystem().takeJobsToRun(); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsEast3, application, true); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
Yes, will fix.
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
}
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1, JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.buildSystem().takeJobsToRun(); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsEast3, application, true); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
LGTM functionally.
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
}
public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsWest1, app, true); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).deploying().isPresent()); tester.notifyJobCompletion(DeploymentJobs.JobType.productionUsEast3, app, true); assertFalse("Change has been deployed", tester.applications().require(app.id()).deploying().isPresent()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1, JobType.productionUsEast3); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.updateVersionStatus(version); tester.upgrader().maintain(); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.failureRedeployer().maintain(); assertEquals("Retried job", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName()); tester.buildSystem().takeJobsToRun(); assertEquals("Job removed", 0, tester.buildSystem().jobs().size()); tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1))); tester.failureRedeployer().maintain(); assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size()); assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(1)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.clock().advance(Duration.ofSeconds(1)); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerDelayed(); assertEquals(1, buildSystem.jobs().size()); assertEquals(JobType.productionUsWest1.id(), buildSystem.jobs().get(0).jobName()); buildSystem.takeJobsToRun(); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.deploy(JobType.productionUsWest1, application, applicationPackage); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); tester.deploymentTrigger().triggerDelayed(); assertTrue("No more jobs triggered at this time", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerDelayed(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerDelayed(); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildSystem().jobs().size()); assertEquals(JobType.productionUsEast3.id(), tester.buildSystem().jobs().get(0).jobName()); assertEquals(JobType.productionUsWest1.id(), tester.buildSystem().jobs().get(1).jobName()); tester.buildSystem().takeJobsToRun(); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsWest1, application, true); assertTrue("No more jobs triggered at this time", tester.buildSystem().jobs().isEmpty()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.notifyJobCompletion(JobType.productionUsEast3, application, true); assertEquals(1, tester.buildSystem().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty()); } @Test @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); BuildSystem buildSystem = tester.buildSystem(); TenantId tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("ap-northeast-1") .build(); tester.notifyJobCompletion(JobType.component, application, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionApNortheast1); assertTrue("All jobs consumed", buildSystem.jobs().isEmpty()); } }
how will this interact with custom renderers? do we expect them to use toString or getScore?
private void renderHitContents(Hit hit) throws IOException { String id = hit.getDisplayId(); if (id != null) generator.writeStringField(ID, id); generator.writeFieldName(RELEVANCE); generator.writeNumber(hit.getRelevance().toString()); if (hit.types().size() > 0) { generator.writeArrayFieldStart(TYPES); for (String t : hit.types()) { generator.writeString(t); } generator.writeEndArray(); } String source = hit.getSource(); if (source != null) generator.writeStringField(SOURCE, hit.getSource()); renderSpecialCasesForGrouping(hit); renderAllFields(hit); }
generator.writeNumber(hit.getRelevance().toString());
private void renderHitContents(Hit hit) throws IOException { String id = hit.getDisplayId(); if (id != null) generator.writeStringField(ID, id); generator.writeFieldName(RELEVANCE); generator.writeNumber(hit.getRelevance().toString()); if (hit.types().size() > 0) { generator.writeArrayFieldStart(TYPES); for (String t : hit.types()) { generator.writeString(t); } generator.writeEndArray(); } String source = hit.getSource(); if (source != null) generator.writeStringField(SOURCE, hit.getSource()); renderSpecialCasesForGrouping(hit); renderAllFields(hit); }
class of com.yahoo.search.result.HitGroup, got %s.", list.getClass()); moreChildren(); renderHitGroupHead((HitGroup) list); } protected void moreChildren() throws IOException { if (!renderedChildren.isEmpty()) childrenArray(); renderedChildren.push(0); }
class of com.yahoo.search.result.HitGroup, got %s.", list.getClass()); moreChildren(); renderHitGroupHead((HitGroup) list); } protected void moreChildren() throws IOException { if (!renderedChildren.isEmpty()) childrenArray(); renderedChildren.push(0); }
(as in maybe we also need to change the XML renderer to unbreak the system tests)
private void renderHitContents(Hit hit) throws IOException { String id = hit.getDisplayId(); if (id != null) generator.writeStringField(ID, id); generator.writeFieldName(RELEVANCE); generator.writeNumber(hit.getRelevance().toString()); if (hit.types().size() > 0) { generator.writeArrayFieldStart(TYPES); for (String t : hit.types()) { generator.writeString(t); } generator.writeEndArray(); } String source = hit.getSource(); if (source != null) generator.writeStringField(SOURCE, hit.getSource()); renderSpecialCasesForGrouping(hit); renderAllFields(hit); }
generator.writeNumber(hit.getRelevance().toString());
private void renderHitContents(Hit hit) throws IOException { String id = hit.getDisplayId(); if (id != null) generator.writeStringField(ID, id); generator.writeFieldName(RELEVANCE); generator.writeNumber(hit.getRelevance().toString()); if (hit.types().size() > 0) { generator.writeArrayFieldStart(TYPES); for (String t : hit.types()) { generator.writeString(t); } generator.writeEndArray(); } String source = hit.getSource(); if (source != null) generator.writeStringField(SOURCE, hit.getSource()); renderSpecialCasesForGrouping(hit); renderAllFields(hit); }
class of com.yahoo.search.result.HitGroup, got %s.", list.getClass()); moreChildren(); renderHitGroupHead((HitGroup) list); } protected void moreChildren() throws IOException { if (!renderedChildren.isEmpty()) childrenArray(); renderedChildren.push(0); }
class of com.yahoo.search.result.HitGroup, got %s.", list.getClass()); moreChildren(); renderHitGroupHead((HitGroup) list); } protected void moreChildren() throws IOException { if (!renderedChildren.isEmpty()) childrenArray(); renderedChildren.push(0); }
I think you can just catch Throwable instead. In the (not very likely) event that a non-error throwable is thrown, the thread will die silently.
public void run() { if (!request.validateParameters()) { log.log(LogLevel.WARNING, "Parameters for request " + request + " did not validate: " + request.errorCode() + " : " + request.errorMessage()); respond(request); return; } Trace trace = request.getRequestTrace(); if (logDebug(trace)) { debugLog(trace, "GetConfigProcessor.run() on " + localHostName); } Optional<TenantName> tenant = rpcServer.resolveTenant(request, trace); if (rpcServer.isHostedVespa() && rpcServer.allTenantsLoaded() && !tenant.isPresent() && isSentinelConfigRequest(request)) { returnEmpty(request); return; } GetConfigContext context = rpcServer.createGetConfigContext(tenant, request, trace); if (context == null || ! context.requestHandler().hasApplication(context.applicationId(), Optional.<Version>empty())) { handleError(request, ErrorCode.APPLICATION_NOT_LOADED, "No application exists"); return; } Optional<Version> vespaVersion = rpcServer.useRequestVersion() ? request.getVespaVersion().map(VespaVersion::toString).map(Version::fromString) : Optional.empty(); if (logDebug(trace)) { debugLog(trace, "Using version " + getPrintableVespaVersion(vespaVersion)); } if ( ! context.requestHandler().hasApplication(context.applicationId(), vespaVersion)) { handleError(request, ErrorCode.UNKNOWN_VESPA_VERSION, "Unknown Vespa version in request: " + getPrintableVespaVersion(vespaVersion)); return; } this.logPre = Tenants.logPre(context.applicationId()); ConfigResponse config; try { config = rpcServer.resolveConfig(request, context, vespaVersion); } catch (UnknownConfigDefinitionException e) { handleError(request, ErrorCode.UNKNOWN_DEFINITION, "Unknown config definition " + request.getConfigKey()); return; } catch (UnknownConfigIdException e) { handleError(request, ErrorCode.ILLEGAL_CONFIGID, "Illegal config id " + request.getConfigKey().getConfigId()); return; } catch (Exception | Error e) { log.log(Level.SEVERE, "Unexpected error handling config request", e); handleError(request, ErrorCode.INTERNAL_ERROR, "Internal error " + e.getMessage()); return; } if ((config != null) && (!config.hasEqualConfig(request) || config.hasNewerGeneration(request) || forceResponse)) { request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); if (logDebug(trace)) { debugLog(trace, "return response: " + request.getShortDescription()); } respond(request); } else { if (logDebug(trace)) { debugLog(trace, "delaying response " + request.getShortDescription()); } rpcServer.delayResponse(request, context); } }
} catch (Exception | Error e) {
public void run() { if (!request.validateParameters()) { log.log(LogLevel.WARNING, "Parameters for request " + request + " did not validate: " + request.errorCode() + " : " + request.errorMessage()); respond(request); return; } Trace trace = request.getRequestTrace(); if (logDebug(trace)) { debugLog(trace, "GetConfigProcessor.run() on " + localHostName); } Optional<TenantName> tenant = rpcServer.resolveTenant(request, trace); if (rpcServer.isHostedVespa() && rpcServer.allTenantsLoaded() && !tenant.isPresent() && isSentinelConfigRequest(request)) { returnEmpty(request); return; } GetConfigContext context = rpcServer.createGetConfigContext(tenant, request, trace); if (context == null || ! context.requestHandler().hasApplication(context.applicationId(), Optional.<Version>empty())) { handleError(request, ErrorCode.APPLICATION_NOT_LOADED, "No application exists"); return; } Optional<Version> vespaVersion = rpcServer.useRequestVersion() ? request.getVespaVersion().map(VespaVersion::toString).map(Version::fromString) : Optional.empty(); if (logDebug(trace)) { debugLog(trace, "Using version " + getPrintableVespaVersion(vespaVersion)); } if ( ! context.requestHandler().hasApplication(context.applicationId(), vespaVersion)) { handleError(request, ErrorCode.UNKNOWN_VESPA_VERSION, "Unknown Vespa version in request: " + getPrintableVespaVersion(vespaVersion)); return; } this.logPre = Tenants.logPre(context.applicationId()); ConfigResponse config; try { config = rpcServer.resolveConfig(request, context, vespaVersion); } catch (UnknownConfigDefinitionException e) { handleError(request, ErrorCode.UNKNOWN_DEFINITION, "Unknown config definition " + request.getConfigKey()); return; } catch (UnknownConfigIdException e) { handleError(request, ErrorCode.ILLEGAL_CONFIGID, "Illegal config id " + request.getConfigKey().getConfigId()); return; } catch (Throwable e) { log.log(Level.SEVERE, "Unexpected error handling config request", e); handleError(request, ErrorCode.INTERNAL_ERROR, "Internal error " + e.getMessage()); return; } if ((config != null) && (!config.hasEqualConfig(request) || config.hasNewerGeneration(request) || forceResponse)) { request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); if (logDebug(trace)) { debugLog(trace, "return response: " + request.getShortDescription()); } respond(request); } else { if (logDebug(trace)) { debugLog(trace, "delaying response " + request.getShortDescription()); } rpcServer.delayResponse(request, context); } }
class GetConfigProcessor implements Runnable { private static final Logger log = Logger.getLogger(GetConfigProcessor.class.getName()); private static final String localHostName = HostName.getLocalhost(); private final JRTServerConfigRequest request; /* True only when this request has expired its server timeout and we need to respond to the client */ private boolean forceResponse = false; private final RpcServer rpcServer; private String logPre = ""; GetConfigProcessor(RpcServer rpcServer, JRTServerConfigRequest request, boolean forceResponse) { this.rpcServer = rpcServer; this.request = request; this.forceResponse = forceResponse; } private void respond(JRTServerConfigRequest request) { final Request req = request.getRequest(); if (req.isError()) { Level logLevel = (req.errorCode() == ErrorCode.APPLICATION_NOT_LOADED) ? LogLevel.DEBUG : LogLevel.INFO; log.log(logLevel, logPre + req.errorMessage()); } rpcServer.respond(request); } private void handleError(JRTServerConfigRequest request, int errorCode, String message) { String target = "(unknown)"; try { target = request.getRequest().target().toString(); } catch (IllegalStateException e) { } request.addErrorResponse(errorCode, logPre + "Failed request (" + message + ") from " + target); respond(request); } private boolean isSentinelConfigRequest(JRTServerConfigRequest request) { return request.getConfigKey().getName().equals(SentinelConfig.getDefName()) && request.getConfigKey().getNamespace().equals(SentinelConfig.getDefNamespace()); } private static String getPrintableVespaVersion(Optional<Version> vespaVersion) { return (vespaVersion.isPresent() ? vespaVersion.get().toString() : "LATEST"); } private void returnEmpty(JRTServerConfigRequest request) { ConfigPayload emptyPayload = ConfigPayload.empty(); String configMd5 = ConfigUtils.getMd5(emptyPayload); ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, null, 0, configMd5); request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); respond(request); } static boolean logDebug(Trace trace) { return trace.shouldTrace(RpcServer.TRACELEVEL_DEBUG) || log.isLoggable(LogLevel.DEBUG); } private void debugLog(Trace trace, String message) { if (logDebug(trace)) { log.log(LogLevel.DEBUG, logPre + message); trace.trace(RpcServer.TRACELEVEL_DEBUG, logPre + message); } } }
class GetConfigProcessor implements Runnable { private static final Logger log = Logger.getLogger(GetConfigProcessor.class.getName()); private static final String localHostName = HostName.getLocalhost(); private final JRTServerConfigRequest request; /* True only when this request has expired its server timeout and we need to respond to the client */ private boolean forceResponse = false; private final RpcServer rpcServer; private String logPre = ""; GetConfigProcessor(RpcServer rpcServer, JRTServerConfigRequest request, boolean forceResponse) { this.rpcServer = rpcServer; this.request = request; this.forceResponse = forceResponse; } private void respond(JRTServerConfigRequest request) { final Request req = request.getRequest(); if (req.isError()) { Level logLevel = (req.errorCode() == ErrorCode.APPLICATION_NOT_LOADED) ? LogLevel.DEBUG : LogLevel.INFO; log.log(logLevel, logPre + req.errorMessage()); } rpcServer.respond(request); } private void handleError(JRTServerConfigRequest request, int errorCode, String message) { String target = "(unknown)"; try { target = request.getRequest().target().toString(); } catch (IllegalStateException e) { } request.addErrorResponse(errorCode, logPre + "Failed request (" + message + ") from " + target); respond(request); } private boolean isSentinelConfigRequest(JRTServerConfigRequest request) { return request.getConfigKey().getName().equals(SentinelConfig.getDefName()) && request.getConfigKey().getNamespace().equals(SentinelConfig.getDefNamespace()); } private static String getPrintableVespaVersion(Optional<Version> vespaVersion) { return (vespaVersion.isPresent() ? vespaVersion.get().toString() : "LATEST"); } private void returnEmpty(JRTServerConfigRequest request) { ConfigPayload emptyPayload = ConfigPayload.empty(); String configMd5 = ConfigUtils.getMd5(emptyPayload); ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, null, 0, configMd5); request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); respond(request); } static boolean logDebug(Trace trace) { return trace.shouldTrace(RpcServer.TRACELEVEL_DEBUG) || log.isLoggable(LogLevel.DEBUG); } private void debugLog(Trace trace, String message) { if (logDebug(trace)) { log.log(LogLevel.DEBUG, logPre + message); trace.trace(RpcServer.TRACELEVEL_DEBUG, logPre + message); } } }
fixed
public void run() { if (!request.validateParameters()) { log.log(LogLevel.WARNING, "Parameters for request " + request + " did not validate: " + request.errorCode() + " : " + request.errorMessage()); respond(request); return; } Trace trace = request.getRequestTrace(); if (logDebug(trace)) { debugLog(trace, "GetConfigProcessor.run() on " + localHostName); } Optional<TenantName> tenant = rpcServer.resolveTenant(request, trace); if (rpcServer.isHostedVespa() && rpcServer.allTenantsLoaded() && !tenant.isPresent() && isSentinelConfigRequest(request)) { returnEmpty(request); return; } GetConfigContext context = rpcServer.createGetConfigContext(tenant, request, trace); if (context == null || ! context.requestHandler().hasApplication(context.applicationId(), Optional.<Version>empty())) { handleError(request, ErrorCode.APPLICATION_NOT_LOADED, "No application exists"); return; } Optional<Version> vespaVersion = rpcServer.useRequestVersion() ? request.getVespaVersion().map(VespaVersion::toString).map(Version::fromString) : Optional.empty(); if (logDebug(trace)) { debugLog(trace, "Using version " + getPrintableVespaVersion(vespaVersion)); } if ( ! context.requestHandler().hasApplication(context.applicationId(), vespaVersion)) { handleError(request, ErrorCode.UNKNOWN_VESPA_VERSION, "Unknown Vespa version in request: " + getPrintableVespaVersion(vespaVersion)); return; } this.logPre = Tenants.logPre(context.applicationId()); ConfigResponse config; try { config = rpcServer.resolveConfig(request, context, vespaVersion); } catch (UnknownConfigDefinitionException e) { handleError(request, ErrorCode.UNKNOWN_DEFINITION, "Unknown config definition " + request.getConfigKey()); return; } catch (UnknownConfigIdException e) { handleError(request, ErrorCode.ILLEGAL_CONFIGID, "Illegal config id " + request.getConfigKey().getConfigId()); return; } catch (Exception | Error e) { log.log(Level.SEVERE, "Unexpected error handling config request", e); handleError(request, ErrorCode.INTERNAL_ERROR, "Internal error " + e.getMessage()); return; } if ((config != null) && (!config.hasEqualConfig(request) || config.hasNewerGeneration(request) || forceResponse)) { request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); if (logDebug(trace)) { debugLog(trace, "return response: " + request.getShortDescription()); } respond(request); } else { if (logDebug(trace)) { debugLog(trace, "delaying response " + request.getShortDescription()); } rpcServer.delayResponse(request, context); } }
} catch (Exception | Error e) {
public void run() { if (!request.validateParameters()) { log.log(LogLevel.WARNING, "Parameters for request " + request + " did not validate: " + request.errorCode() + " : " + request.errorMessage()); respond(request); return; } Trace trace = request.getRequestTrace(); if (logDebug(trace)) { debugLog(trace, "GetConfigProcessor.run() on " + localHostName); } Optional<TenantName> tenant = rpcServer.resolveTenant(request, trace); if (rpcServer.isHostedVespa() && rpcServer.allTenantsLoaded() && !tenant.isPresent() && isSentinelConfigRequest(request)) { returnEmpty(request); return; } GetConfigContext context = rpcServer.createGetConfigContext(tenant, request, trace); if (context == null || ! context.requestHandler().hasApplication(context.applicationId(), Optional.<Version>empty())) { handleError(request, ErrorCode.APPLICATION_NOT_LOADED, "No application exists"); return; } Optional<Version> vespaVersion = rpcServer.useRequestVersion() ? request.getVespaVersion().map(VespaVersion::toString).map(Version::fromString) : Optional.empty(); if (logDebug(trace)) { debugLog(trace, "Using version " + getPrintableVespaVersion(vespaVersion)); } if ( ! context.requestHandler().hasApplication(context.applicationId(), vespaVersion)) { handleError(request, ErrorCode.UNKNOWN_VESPA_VERSION, "Unknown Vespa version in request: " + getPrintableVespaVersion(vespaVersion)); return; } this.logPre = Tenants.logPre(context.applicationId()); ConfigResponse config; try { config = rpcServer.resolveConfig(request, context, vespaVersion); } catch (UnknownConfigDefinitionException e) { handleError(request, ErrorCode.UNKNOWN_DEFINITION, "Unknown config definition " + request.getConfigKey()); return; } catch (UnknownConfigIdException e) { handleError(request, ErrorCode.ILLEGAL_CONFIGID, "Illegal config id " + request.getConfigKey().getConfigId()); return; } catch (Throwable e) { log.log(Level.SEVERE, "Unexpected error handling config request", e); handleError(request, ErrorCode.INTERNAL_ERROR, "Internal error " + e.getMessage()); return; } if ((config != null) && (!config.hasEqualConfig(request) || config.hasNewerGeneration(request) || forceResponse)) { request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); if (logDebug(trace)) { debugLog(trace, "return response: " + request.getShortDescription()); } respond(request); } else { if (logDebug(trace)) { debugLog(trace, "delaying response " + request.getShortDescription()); } rpcServer.delayResponse(request, context); } }
class GetConfigProcessor implements Runnable { private static final Logger log = Logger.getLogger(GetConfigProcessor.class.getName()); private static final String localHostName = HostName.getLocalhost(); private final JRTServerConfigRequest request; /* True only when this request has expired its server timeout and we need to respond to the client */ private boolean forceResponse = false; private final RpcServer rpcServer; private String logPre = ""; GetConfigProcessor(RpcServer rpcServer, JRTServerConfigRequest request, boolean forceResponse) { this.rpcServer = rpcServer; this.request = request; this.forceResponse = forceResponse; } private void respond(JRTServerConfigRequest request) { final Request req = request.getRequest(); if (req.isError()) { Level logLevel = (req.errorCode() == ErrorCode.APPLICATION_NOT_LOADED) ? LogLevel.DEBUG : LogLevel.INFO; log.log(logLevel, logPre + req.errorMessage()); } rpcServer.respond(request); } private void handleError(JRTServerConfigRequest request, int errorCode, String message) { String target = "(unknown)"; try { target = request.getRequest().target().toString(); } catch (IllegalStateException e) { } request.addErrorResponse(errorCode, logPre + "Failed request (" + message + ") from " + target); respond(request); } private boolean isSentinelConfigRequest(JRTServerConfigRequest request) { return request.getConfigKey().getName().equals(SentinelConfig.getDefName()) && request.getConfigKey().getNamespace().equals(SentinelConfig.getDefNamespace()); } private static String getPrintableVespaVersion(Optional<Version> vespaVersion) { return (vespaVersion.isPresent() ? vespaVersion.get().toString() : "LATEST"); } private void returnEmpty(JRTServerConfigRequest request) { ConfigPayload emptyPayload = ConfigPayload.empty(); String configMd5 = ConfigUtils.getMd5(emptyPayload); ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, null, 0, configMd5); request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); respond(request); } static boolean logDebug(Trace trace) { return trace.shouldTrace(RpcServer.TRACELEVEL_DEBUG) || log.isLoggable(LogLevel.DEBUG); } private void debugLog(Trace trace, String message) { if (logDebug(trace)) { log.log(LogLevel.DEBUG, logPre + message); trace.trace(RpcServer.TRACELEVEL_DEBUG, logPre + message); } } }
class GetConfigProcessor implements Runnable { private static final Logger log = Logger.getLogger(GetConfigProcessor.class.getName()); private static final String localHostName = HostName.getLocalhost(); private final JRTServerConfigRequest request; /* True only when this request has expired its server timeout and we need to respond to the client */ private boolean forceResponse = false; private final RpcServer rpcServer; private String logPre = ""; GetConfigProcessor(RpcServer rpcServer, JRTServerConfigRequest request, boolean forceResponse) { this.rpcServer = rpcServer; this.request = request; this.forceResponse = forceResponse; } private void respond(JRTServerConfigRequest request) { final Request req = request.getRequest(); if (req.isError()) { Level logLevel = (req.errorCode() == ErrorCode.APPLICATION_NOT_LOADED) ? LogLevel.DEBUG : LogLevel.INFO; log.log(logLevel, logPre + req.errorMessage()); } rpcServer.respond(request); } private void handleError(JRTServerConfigRequest request, int errorCode, String message) { String target = "(unknown)"; try { target = request.getRequest().target().toString(); } catch (IllegalStateException e) { } request.addErrorResponse(errorCode, logPre + "Failed request (" + message + ") from " + target); respond(request); } private boolean isSentinelConfigRequest(JRTServerConfigRequest request) { return request.getConfigKey().getName().equals(SentinelConfig.getDefName()) && request.getConfigKey().getNamespace().equals(SentinelConfig.getDefNamespace()); } private static String getPrintableVespaVersion(Optional<Version> vespaVersion) { return (vespaVersion.isPresent() ? vespaVersion.get().toString() : "LATEST"); } private void returnEmpty(JRTServerConfigRequest request) { ConfigPayload emptyPayload = ConfigPayload.empty(); String configMd5 = ConfigUtils.getMd5(emptyPayload); ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, null, 0, configMd5); request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.getConfigMd5()); respond(request); } static boolean logDebug(Trace trace) { return trace.shouldTrace(RpcServer.TRACELEVEL_DEBUG) || log.isLoggable(LogLevel.DEBUG); } private void debugLog(Trace trace, String message) { if (logDebug(trace)) { log.log(LogLevel.DEBUG, logPre + message); trace.trace(RpcServer.TRACELEVEL_DEBUG, logPre + message); } } }
Is this required when `running.set(false)` is always invoked in the `finally` block below?
public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); synchronized (monitor) { monitor.notifyAll(); } } }
synchronized (monitor) { running.set(false); }
public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); synchronized (monitor) { monitor.notifyAll(); } } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); stateChangeHandler.handleAllDistributorsInSync( stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest("Processing remote task " + task.getClass().getName()); task.doRemoteFleetControllerTask(context); task.notifyCompleted(); log.finest("Done processing remote task " + task.getClass().getName()); return true; } return false; } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); return true; } } return false; } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); } wantedStateChanged = false; isMaster = false; } return didWork; } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); stateChangeHandler.handleAllDistributorsInSync( stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest("Processing remote task " + task.getClass().getName()); task.doRemoteFleetControllerTask(context); task.notifyCompleted(); log.finest("Done processing remote task " + task.getClass().getName()); return true; } return false; } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); return true; } } return false; } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); } wantedStateChanged = false; isMaster = false; } return didWork; } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Since there is a System.exit on the line following it, it will never reach the finally block. But if it does any good is another question. I just thought I would leave it there. I am not sure what happens after System.exit has been called. Since it is not a daemon thread it might block exit(). So I chose just leave it as it is.
public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); synchronized (monitor) { monitor.notifyAll(); } } }
synchronized (monitor) { running.set(false); }
public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); synchronized (monitor) { monitor.notifyAll(); } } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); stateChangeHandler.handleAllDistributorsInSync( stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest("Processing remote task " + task.getClass().getName()); task.doRemoteFleetControllerTask(context); task.notifyCompleted(); log.finest("Done processing remote task " + task.getClass().getName()); return true; } return false; } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); return true; } } return false; } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); } wantedStateChanged = false; isMaster = false; } return didWork; } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); stateChangeHandler.handleAllDistributorsInSync( stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest("Processing remote task " + task.getClass().getName()); task.doRemoteFleetControllerTask(context); task.notifyCompleted(); log.finest("Done processing remote task " + task.getClass().getName()); return true; } return false; } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); return true; } } return false; } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); } wantedStateChanged = false; isMaster = false; } return didWork; } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Good point, best to stay on the safe side.
public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); synchronized (monitor) { monitor.notifyAll(); } } }
synchronized (monitor) { running.set(false); }
public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); synchronized (monitor) { monitor.notifyAll(); } } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); stateChangeHandler.handleAllDistributorsInSync( stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest("Processing remote task " + task.getClass().getName()); task.doRemoteFleetControllerTask(context); task.notifyCompleted(); log.finest("Done processing remote task " + task.getClass().getName()); return true; } return false; } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); return true; } } return false; } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); } wantedStateChanged = false; isMaster = false; } return didWork; } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); stateChangeHandler.handleAllDistributorsInSync( stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest("Processing remote task " + task.getClass().getName()); task.doRemoteFleetControllerTask(context); task.notifyCompleted(); log.finest("Done processing remote task " + task.getClass().getName()); return true; } return false; } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); return true; } } return false; } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); } wantedStateChanged = false; isMaster = false; } return didWork; } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Consider using `TemporalAmount` instead of an integer as timeout.
public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } }
Set<ApplicationInstanceReference> resultSet = new HashSet<>();
public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); @GuardedBy("threadsHoldingLock") private static final Map<Thread, ApplicationInstanceReference> threadsHoldingLock = new HashMap<>(); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; private final Curator curator; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; } @Override public ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference) { return new ReadOnlyStatusRegistry() { @Override public HostStatus getHostStatus(HostName hostName) { return getInternalHostStatus(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } }; } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference applicationInstanceReference) { return lockApplicationInstance_forCurrentThreadOnly(applicationInstanceReference, 10); } @Override MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( ApplicationInstanceReference applicationInstanceReference, long timeoutSeconds) { Thread currentThread = Thread.currentThread(); assertThreadDoesNotHoldLock(currentThread,"Can't lock " + applicationInstanceReference); try { SessionFailRetryLoop sessionFailRetryLoop = curator.framework().getZookeeperClient().newSessionFailRetryLoop(Mode.FAIL); sessionFailRetryLoop.start(); try { String lockPath = applicationInstanceLockPath(applicationInstanceReference); InterProcessSemaphoreMutex mutex = acquireMutexOrThrow(timeoutSeconds, TimeUnit.SECONDS, lockPath); Lock lock2; try { String lock2Path = applicationInstanceLock2Path(applicationInstanceReference); lock2 = new Lock(lock2Path, curator); lock2.acquire(Duration.ofSeconds(timeoutSeconds)); } catch (Throwable t) { mutex.release(); throw t; } synchronized (threadsHoldingLock) { threadsHoldingLock.put(currentThread, applicationInstanceReference); } return new ZkMutableStatusRegistry( lock2, mutex, sessionFailRetryLoop, applicationInstanceReference, currentThread); } catch (Throwable t) { sessionFailRetryLoop.close(); throw t; } } catch (Exception e) { throw new RuntimeException(e); } } private void assertThreadDoesNotHoldLock(Thread currentThread, String message) { synchronized (threadsHoldingLock) { if (threadsHoldingLock.containsKey(currentThread)) { throw new AssertionError(message + ", already have a lock on " + threadsHoldingLock.get(currentThread)); } } } private InterProcessSemaphoreMutex acquireMutexOrThrow(long timeout, TimeUnit timeoutTimeUnit, String lockPath) throws Exception { InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator.framework(), lockPath); log.log(LogLevel.DEBUG, "Waiting for lock on " + lockPath); boolean acquired = mutex.acquire(timeout, timeoutTimeUnit); if (!acquired) { log.log(LogLevel.DEBUG, "Timed out waiting for lock on " + lockPath); throw new TimeoutException("Timed out waiting for lock on " + lockPath); } log.log(LogLevel.DEBUG, "Successfully acquired lock on " + lockPath); return mutex; } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { assertThreadHoldsLock(applicationInstanceReference); String path = hostAllowedDownPath(applicationInstanceReference, hostName); try { switch (status) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path,"Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); } } catch (Exception e) { throw new RuntimeException(e); } } private static void assertThreadHoldsLock(ApplicationInstanceReference applicationInstanceReference) { synchronized (threadsHoldingLock) { ApplicationInstanceReference lockedApplicationInstanceReference = threadsHoldingLock.get(Thread.currentThread()); if (lockedApplicationInstanceReference == null) { throw new AssertionError("The current thread does not own any status service locks. " + "Application Instance = " + applicationInstanceReference); } if (!lockedApplicationInstanceReference.equals(applicationInstanceReference)) { throw new AssertionError("The current thread does not have a lock on " + "application instance " + applicationInstanceReference + ", but instead have a lock on " + lockedApplicationInstanceReference); } } } private void deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); } } private void createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); } } private HostStatus getInternalHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { try { Stat statOrNull = curator.framework().checkExists().forPath( hostAllowedDownPath(applicationInstanceReference, hostName)); return (statOrNull == null) ? HostStatus.NO_REMARKS : HostStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } /** Common implementation for the two internal classes that sets ApplicationInstanceStatus. */ private ApplicationInstanceStatus getInternalApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private HostStatus getHostStatusWithLock( final ApplicationInstanceReference applicationInstanceReference, final HostName hostName) { assertThreadHoldsLock(applicationInstanceReference); return getInternalHostStatus(applicationInstanceReference, hostName); } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLockPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final InterProcessSemaphoreMutex mutex; private final SessionFailRetryLoop sessionFailRetryLoop; private final ApplicationInstanceReference applicationInstanceReference; private final Thread lockingThread; public ZkMutableStatusRegistry( Lock lock, InterProcessSemaphoreMutex mutex, SessionFailRetryLoop sessionFailRetryLoop, ApplicationInstanceReference applicationInstanceReference, Thread lockingThread) { this.mutex = mutex; this.lock = lock; this.sessionFailRetryLoop = sessionFailRetryLoop; this.applicationInstanceReference = applicationInstanceReference; this.lockingThread = lockingThread; } @Override public void setHostState(final HostName hostName, final HostStatus status) { setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { assertThreadHoldsLock(applicationInstanceReference); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public HostStatus getHostStatus(final HostName hostName) { return getHostStatusWithLock(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } @Override @NoThrow public void close() { synchronized (threadsHoldingLock) { threadsHoldingLock.remove(lockingThread, applicationInstanceReference); } try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } try { mutex.release(); } catch (Exception e) { if (e.getCause() instanceof SessionFailedException) { log.log(LogLevel.DEBUG, "Session expired, mutex should be freed automatically", e); } else { log.log(LogLevel.WARNING, "Failed unlocking application instance " + applicationInstanceReference, e); } } if (lockingThread != Thread.currentThread()) { throw new AssertionError("LockHandle should only be used from a single thread. " + "Application instance = " + applicationInstanceReference + " Locking thread = " + lockingThread + " Current thread = " + Thread.currentThread()); } try { sessionFailRetryLoop.close(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed closing SessionRetryLoop", e); } } } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); @GuardedBy("threadsHoldingLock") private static final Map<Thread, ApplicationInstanceReference> threadsHoldingLock = new HashMap<>(); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; private final Curator curator; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; } @Override public ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference) { return new ReadOnlyStatusRegistry() { @Override public HostStatus getHostStatus(HostName hostName) { return getInternalHostStatus(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } }; } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference applicationInstanceReference) { return lockApplicationInstance_forCurrentThreadOnly(applicationInstanceReference, 10); } @Override MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( ApplicationInstanceReference applicationInstanceReference, long timeoutSeconds) { Thread currentThread = Thread.currentThread(); assertThreadDoesNotHoldLock(currentThread,"Can't lock " + applicationInstanceReference); try { SessionFailRetryLoop sessionFailRetryLoop = curator.framework().getZookeeperClient().newSessionFailRetryLoop(Mode.FAIL); sessionFailRetryLoop.start(); try { String lockPath = applicationInstanceLockPath(applicationInstanceReference); InterProcessSemaphoreMutex mutex = acquireMutexOrThrow(timeoutSeconds, TimeUnit.SECONDS, lockPath); Lock lock2; try { String lock2Path = applicationInstanceLock2Path(applicationInstanceReference); lock2 = new Lock(lock2Path, curator); lock2.acquire(Duration.ofSeconds(timeoutSeconds)); } catch (Throwable t) { mutex.release(); throw t; } synchronized (threadsHoldingLock) { threadsHoldingLock.put(currentThread, applicationInstanceReference); } return new ZkMutableStatusRegistry( lock2, mutex, sessionFailRetryLoop, applicationInstanceReference, currentThread); } catch (Throwable t) { sessionFailRetryLoop.close(); throw t; } } catch (Exception e) { throw new RuntimeException(e); } } private void assertThreadDoesNotHoldLock(Thread currentThread, String message) { synchronized (threadsHoldingLock) { if (threadsHoldingLock.containsKey(currentThread)) { throw new AssertionError(message + ", already have a lock on " + threadsHoldingLock.get(currentThread)); } } } private InterProcessSemaphoreMutex acquireMutexOrThrow(long timeout, TimeUnit timeoutTimeUnit, String lockPath) throws Exception { InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator.framework(), lockPath); log.log(LogLevel.DEBUG, "Waiting for lock on " + lockPath); boolean acquired = mutex.acquire(timeout, timeoutTimeUnit); if (!acquired) { log.log(LogLevel.DEBUG, "Timed out waiting for lock on " + lockPath); throw new TimeoutException("Timed out waiting for lock on " + lockPath); } log.log(LogLevel.DEBUG, "Successfully acquired lock on " + lockPath); return mutex; } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { assertThreadHoldsLock(applicationInstanceReference); String path = hostAllowedDownPath(applicationInstanceReference, hostName); try { switch (status) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path,"Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); } } catch (Exception e) { throw new RuntimeException(e); } } private static void assertThreadHoldsLock(ApplicationInstanceReference applicationInstanceReference) { synchronized (threadsHoldingLock) { ApplicationInstanceReference lockedApplicationInstanceReference = threadsHoldingLock.get(Thread.currentThread()); if (lockedApplicationInstanceReference == null) { throw new AssertionError("The current thread does not own any status service locks. " + "Application Instance = " + applicationInstanceReference); } if (!lockedApplicationInstanceReference.equals(applicationInstanceReference)) { throw new AssertionError("The current thread does not have a lock on " + "application instance " + applicationInstanceReference + ", but instead have a lock on " + lockedApplicationInstanceReference); } } } private void deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); } } private void createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); } } private HostStatus getInternalHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { try { Stat statOrNull = curator.framework().checkExists().forPath( hostAllowedDownPath(applicationInstanceReference, hostName)); return (statOrNull == null) ? HostStatus.NO_REMARKS : HostStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } /** Common implementation for the two internal classes that sets ApplicationInstanceStatus. */ private ApplicationInstanceStatus getInternalApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private HostStatus getHostStatusWithLock( final ApplicationInstanceReference applicationInstanceReference, final HostName hostName) { assertThreadHoldsLock(applicationInstanceReference); return getInternalHostStatus(applicationInstanceReference, hostName); } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLockPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final InterProcessSemaphoreMutex mutex; private final SessionFailRetryLoop sessionFailRetryLoop; private final ApplicationInstanceReference applicationInstanceReference; private final Thread lockingThread; public ZkMutableStatusRegistry( Lock lock, InterProcessSemaphoreMutex mutex, SessionFailRetryLoop sessionFailRetryLoop, ApplicationInstanceReference applicationInstanceReference, Thread lockingThread) { this.mutex = mutex; this.lock = lock; this.sessionFailRetryLoop = sessionFailRetryLoop; this.applicationInstanceReference = applicationInstanceReference; this.lockingThread = lockingThread; } @Override public void setHostState(final HostName hostName, final HostStatus status) { setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { assertThreadHoldsLock(applicationInstanceReference); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public HostStatus getHostStatus(final HostName hostName) { return getHostStatusWithLock(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } @Override @NoThrow public void close() { synchronized (threadsHoldingLock) { threadsHoldingLock.remove(lockingThread, applicationInstanceReference); } try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } try { mutex.release(); } catch (Exception e) { if (e.getCause() instanceof SessionFailedException) { log.log(LogLevel.DEBUG, "Session expired, mutex should be freed automatically", e); } else { log.log(LogLevel.WARNING, "Failed unlocking application instance " + applicationInstanceReference, e); } } if (lockingThread != Thread.currentThread()) { throw new AssertionError("LockHandle should only be used from a single thread. " + "Application instance = " + applicationInstanceReference + " Locking thread = " + lockingThread + " Current thread = " + Thread.currentThread()); } try { sessionFailRetryLoop.close(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed closing SessionRetryLoop", e); } } } }
This is part of the code to be removed - the new code uses Duration.
public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } }
Set<ApplicationInstanceReference> resultSet = new HashSet<>();
public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); @GuardedBy("threadsHoldingLock") private static final Map<Thread, ApplicationInstanceReference> threadsHoldingLock = new HashMap<>(); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; private final Curator curator; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; } @Override public ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference) { return new ReadOnlyStatusRegistry() { @Override public HostStatus getHostStatus(HostName hostName) { return getInternalHostStatus(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } }; } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference applicationInstanceReference) { return lockApplicationInstance_forCurrentThreadOnly(applicationInstanceReference, 10); } @Override MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( ApplicationInstanceReference applicationInstanceReference, long timeoutSeconds) { Thread currentThread = Thread.currentThread(); assertThreadDoesNotHoldLock(currentThread,"Can't lock " + applicationInstanceReference); try { SessionFailRetryLoop sessionFailRetryLoop = curator.framework().getZookeeperClient().newSessionFailRetryLoop(Mode.FAIL); sessionFailRetryLoop.start(); try { String lockPath = applicationInstanceLockPath(applicationInstanceReference); InterProcessSemaphoreMutex mutex = acquireMutexOrThrow(timeoutSeconds, TimeUnit.SECONDS, lockPath); Lock lock2; try { String lock2Path = applicationInstanceLock2Path(applicationInstanceReference); lock2 = new Lock(lock2Path, curator); lock2.acquire(Duration.ofSeconds(timeoutSeconds)); } catch (Throwable t) { mutex.release(); throw t; } synchronized (threadsHoldingLock) { threadsHoldingLock.put(currentThread, applicationInstanceReference); } return new ZkMutableStatusRegistry( lock2, mutex, sessionFailRetryLoop, applicationInstanceReference, currentThread); } catch (Throwable t) { sessionFailRetryLoop.close(); throw t; } } catch (Exception e) { throw new RuntimeException(e); } } private void assertThreadDoesNotHoldLock(Thread currentThread, String message) { synchronized (threadsHoldingLock) { if (threadsHoldingLock.containsKey(currentThread)) { throw new AssertionError(message + ", already have a lock on " + threadsHoldingLock.get(currentThread)); } } } private InterProcessSemaphoreMutex acquireMutexOrThrow(long timeout, TimeUnit timeoutTimeUnit, String lockPath) throws Exception { InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator.framework(), lockPath); log.log(LogLevel.DEBUG, "Waiting for lock on " + lockPath); boolean acquired = mutex.acquire(timeout, timeoutTimeUnit); if (!acquired) { log.log(LogLevel.DEBUG, "Timed out waiting for lock on " + lockPath); throw new TimeoutException("Timed out waiting for lock on " + lockPath); } log.log(LogLevel.DEBUG, "Successfully acquired lock on " + lockPath); return mutex; } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { assertThreadHoldsLock(applicationInstanceReference); String path = hostAllowedDownPath(applicationInstanceReference, hostName); try { switch (status) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path,"Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); } } catch (Exception e) { throw new RuntimeException(e); } } private static void assertThreadHoldsLock(ApplicationInstanceReference applicationInstanceReference) { synchronized (threadsHoldingLock) { ApplicationInstanceReference lockedApplicationInstanceReference = threadsHoldingLock.get(Thread.currentThread()); if (lockedApplicationInstanceReference == null) { throw new AssertionError("The current thread does not own any status service locks. " + "Application Instance = " + applicationInstanceReference); } if (!lockedApplicationInstanceReference.equals(applicationInstanceReference)) { throw new AssertionError("The current thread does not have a lock on " + "application instance " + applicationInstanceReference + ", but instead have a lock on " + lockedApplicationInstanceReference); } } } private void deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); } } private void createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); } } private HostStatus getInternalHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { try { Stat statOrNull = curator.framework().checkExists().forPath( hostAllowedDownPath(applicationInstanceReference, hostName)); return (statOrNull == null) ? HostStatus.NO_REMARKS : HostStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } /** Common implementation for the two internal classes that sets ApplicationInstanceStatus. */ private ApplicationInstanceStatus getInternalApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private HostStatus getHostStatusWithLock( final ApplicationInstanceReference applicationInstanceReference, final HostName hostName) { assertThreadHoldsLock(applicationInstanceReference); return getInternalHostStatus(applicationInstanceReference, hostName); } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLockPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final InterProcessSemaphoreMutex mutex; private final SessionFailRetryLoop sessionFailRetryLoop; private final ApplicationInstanceReference applicationInstanceReference; private final Thread lockingThread; public ZkMutableStatusRegistry( Lock lock, InterProcessSemaphoreMutex mutex, SessionFailRetryLoop sessionFailRetryLoop, ApplicationInstanceReference applicationInstanceReference, Thread lockingThread) { this.mutex = mutex; this.lock = lock; this.sessionFailRetryLoop = sessionFailRetryLoop; this.applicationInstanceReference = applicationInstanceReference; this.lockingThread = lockingThread; } @Override public void setHostState(final HostName hostName, final HostStatus status) { setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { assertThreadHoldsLock(applicationInstanceReference); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public HostStatus getHostStatus(final HostName hostName) { return getHostStatusWithLock(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } @Override @NoThrow public void close() { synchronized (threadsHoldingLock) { threadsHoldingLock.remove(lockingThread, applicationInstanceReference); } try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } try { mutex.release(); } catch (Exception e) { if (e.getCause() instanceof SessionFailedException) { log.log(LogLevel.DEBUG, "Session expired, mutex should be freed automatically", e); } else { log.log(LogLevel.WARNING, "Failed unlocking application instance " + applicationInstanceReference, e); } } if (lockingThread != Thread.currentThread()) { throw new AssertionError("LockHandle should only be used from a single thread. " + "Application instance = " + applicationInstanceReference + " Locking thread = " + lockingThread + " Current thread = " + Thread.currentThread()); } try { sessionFailRetryLoop.close(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed closing SessionRetryLoop", e); } } } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); @GuardedBy("threadsHoldingLock") private static final Map<Thread, ApplicationInstanceReference> threadsHoldingLock = new HashMap<>(); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; private final Curator curator; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; } @Override public ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference) { return new ReadOnlyStatusRegistry() { @Override public HostStatus getHostStatus(HostName hostName) { return getInternalHostStatus(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } }; } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference applicationInstanceReference) { return lockApplicationInstance_forCurrentThreadOnly(applicationInstanceReference, 10); } @Override MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( ApplicationInstanceReference applicationInstanceReference, long timeoutSeconds) { Thread currentThread = Thread.currentThread(); assertThreadDoesNotHoldLock(currentThread,"Can't lock " + applicationInstanceReference); try { SessionFailRetryLoop sessionFailRetryLoop = curator.framework().getZookeeperClient().newSessionFailRetryLoop(Mode.FAIL); sessionFailRetryLoop.start(); try { String lockPath = applicationInstanceLockPath(applicationInstanceReference); InterProcessSemaphoreMutex mutex = acquireMutexOrThrow(timeoutSeconds, TimeUnit.SECONDS, lockPath); Lock lock2; try { String lock2Path = applicationInstanceLock2Path(applicationInstanceReference); lock2 = new Lock(lock2Path, curator); lock2.acquire(Duration.ofSeconds(timeoutSeconds)); } catch (Throwable t) { mutex.release(); throw t; } synchronized (threadsHoldingLock) { threadsHoldingLock.put(currentThread, applicationInstanceReference); } return new ZkMutableStatusRegistry( lock2, mutex, sessionFailRetryLoop, applicationInstanceReference, currentThread); } catch (Throwable t) { sessionFailRetryLoop.close(); throw t; } } catch (Exception e) { throw new RuntimeException(e); } } private void assertThreadDoesNotHoldLock(Thread currentThread, String message) { synchronized (threadsHoldingLock) { if (threadsHoldingLock.containsKey(currentThread)) { throw new AssertionError(message + ", already have a lock on " + threadsHoldingLock.get(currentThread)); } } } private InterProcessSemaphoreMutex acquireMutexOrThrow(long timeout, TimeUnit timeoutTimeUnit, String lockPath) throws Exception { InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator.framework(), lockPath); log.log(LogLevel.DEBUG, "Waiting for lock on " + lockPath); boolean acquired = mutex.acquire(timeout, timeoutTimeUnit); if (!acquired) { log.log(LogLevel.DEBUG, "Timed out waiting for lock on " + lockPath); throw new TimeoutException("Timed out waiting for lock on " + lockPath); } log.log(LogLevel.DEBUG, "Successfully acquired lock on " + lockPath); return mutex; } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { assertThreadHoldsLock(applicationInstanceReference); String path = hostAllowedDownPath(applicationInstanceReference, hostName); try { switch (status) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path,"Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); } } catch (Exception e) { throw new RuntimeException(e); } } private static void assertThreadHoldsLock(ApplicationInstanceReference applicationInstanceReference) { synchronized (threadsHoldingLock) { ApplicationInstanceReference lockedApplicationInstanceReference = threadsHoldingLock.get(Thread.currentThread()); if (lockedApplicationInstanceReference == null) { throw new AssertionError("The current thread does not own any status service locks. " + "Application Instance = " + applicationInstanceReference); } if (!lockedApplicationInstanceReference.equals(applicationInstanceReference)) { throw new AssertionError("The current thread does not have a lock on " + "application instance " + applicationInstanceReference + ", but instead have a lock on " + lockedApplicationInstanceReference); } } } private void deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); } } private void createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); } } private HostStatus getInternalHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { try { Stat statOrNull = curator.framework().checkExists().forPath( hostAllowedDownPath(applicationInstanceReference, hostName)); return (statOrNull == null) ? HostStatus.NO_REMARKS : HostStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } /** Common implementation for the two internal classes that sets ApplicationInstanceStatus. */ private ApplicationInstanceStatus getInternalApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private HostStatus getHostStatusWithLock( final ApplicationInstanceReference applicationInstanceReference, final HostName hostName) { assertThreadHoldsLock(applicationInstanceReference); return getInternalHostStatus(applicationInstanceReference, hostName); } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLockPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final InterProcessSemaphoreMutex mutex; private final SessionFailRetryLoop sessionFailRetryLoop; private final ApplicationInstanceReference applicationInstanceReference; private final Thread lockingThread; public ZkMutableStatusRegistry( Lock lock, InterProcessSemaphoreMutex mutex, SessionFailRetryLoop sessionFailRetryLoop, ApplicationInstanceReference applicationInstanceReference, Thread lockingThread) { this.mutex = mutex; this.lock = lock; this.sessionFailRetryLoop = sessionFailRetryLoop; this.applicationInstanceReference = applicationInstanceReference; this.lockingThread = lockingThread; } @Override public void setHostState(final HostName hostName, final HostStatus status) { setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { assertThreadHoldsLock(applicationInstanceReference); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public HostStatus getHostStatus(final HostName hostName) { return getHostStatusWithLock(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } @Override @NoThrow public void close() { synchronized (threadsHoldingLock) { threadsHoldingLock.remove(lockingThread, applicationInstanceReference); } try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } try { mutex.release(); } catch (Exception e) { if (e.getCause() instanceof SessionFailedException) { log.log(LogLevel.DEBUG, "Session expired, mutex should be freed automatically", e); } else { log.log(LogLevel.WARNING, "Failed unlocking application instance " + applicationInstanceReference, e); } } if (lockingThread != Thread.currentThread()) { throw new AssertionError("LockHandle should only be used from a single thread. " + "Application instance = " + applicationInstanceReference + " Locking thread = " + lockingThread + " Current thread = " + Thread.currentThread()); } try { sessionFailRetryLoop.close(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed closing SessionRetryLoop", e); } } } }
:+1:
public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } }
Set<ApplicationInstanceReference> resultSet = new HashSet<>();
public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); @GuardedBy("threadsHoldingLock") private static final Map<Thread, ApplicationInstanceReference> threadsHoldingLock = new HashMap<>(); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; private final Curator curator; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; } @Override public ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference) { return new ReadOnlyStatusRegistry() { @Override public HostStatus getHostStatus(HostName hostName) { return getInternalHostStatus(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } }; } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference applicationInstanceReference) { return lockApplicationInstance_forCurrentThreadOnly(applicationInstanceReference, 10); } @Override MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( ApplicationInstanceReference applicationInstanceReference, long timeoutSeconds) { Thread currentThread = Thread.currentThread(); assertThreadDoesNotHoldLock(currentThread,"Can't lock " + applicationInstanceReference); try { SessionFailRetryLoop sessionFailRetryLoop = curator.framework().getZookeeperClient().newSessionFailRetryLoop(Mode.FAIL); sessionFailRetryLoop.start(); try { String lockPath = applicationInstanceLockPath(applicationInstanceReference); InterProcessSemaphoreMutex mutex = acquireMutexOrThrow(timeoutSeconds, TimeUnit.SECONDS, lockPath); Lock lock2; try { String lock2Path = applicationInstanceLock2Path(applicationInstanceReference); lock2 = new Lock(lock2Path, curator); lock2.acquire(Duration.ofSeconds(timeoutSeconds)); } catch (Throwable t) { mutex.release(); throw t; } synchronized (threadsHoldingLock) { threadsHoldingLock.put(currentThread, applicationInstanceReference); } return new ZkMutableStatusRegistry( lock2, mutex, sessionFailRetryLoop, applicationInstanceReference, currentThread); } catch (Throwable t) { sessionFailRetryLoop.close(); throw t; } } catch (Exception e) { throw new RuntimeException(e); } } private void assertThreadDoesNotHoldLock(Thread currentThread, String message) { synchronized (threadsHoldingLock) { if (threadsHoldingLock.containsKey(currentThread)) { throw new AssertionError(message + ", already have a lock on " + threadsHoldingLock.get(currentThread)); } } } private InterProcessSemaphoreMutex acquireMutexOrThrow(long timeout, TimeUnit timeoutTimeUnit, String lockPath) throws Exception { InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator.framework(), lockPath); log.log(LogLevel.DEBUG, "Waiting for lock on " + lockPath); boolean acquired = mutex.acquire(timeout, timeoutTimeUnit); if (!acquired) { log.log(LogLevel.DEBUG, "Timed out waiting for lock on " + lockPath); throw new TimeoutException("Timed out waiting for lock on " + lockPath); } log.log(LogLevel.DEBUG, "Successfully acquired lock on " + lockPath); return mutex; } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { assertThreadHoldsLock(applicationInstanceReference); String path = hostAllowedDownPath(applicationInstanceReference, hostName); try { switch (status) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path,"Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); } } catch (Exception e) { throw new RuntimeException(e); } } private static void assertThreadHoldsLock(ApplicationInstanceReference applicationInstanceReference) { synchronized (threadsHoldingLock) { ApplicationInstanceReference lockedApplicationInstanceReference = threadsHoldingLock.get(Thread.currentThread()); if (lockedApplicationInstanceReference == null) { throw new AssertionError("The current thread does not own any status service locks. " + "Application Instance = " + applicationInstanceReference); } if (!lockedApplicationInstanceReference.equals(applicationInstanceReference)) { throw new AssertionError("The current thread does not have a lock on " + "application instance " + applicationInstanceReference + ", but instead have a lock on " + lockedApplicationInstanceReference); } } } private void deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); } } private void createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); } } private HostStatus getInternalHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { try { Stat statOrNull = curator.framework().checkExists().forPath( hostAllowedDownPath(applicationInstanceReference, hostName)); return (statOrNull == null) ? HostStatus.NO_REMARKS : HostStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } /** Common implementation for the two internal classes that sets ApplicationInstanceStatus. */ private ApplicationInstanceStatus getInternalApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private HostStatus getHostStatusWithLock( final ApplicationInstanceReference applicationInstanceReference, final HostName hostName) { assertThreadHoldsLock(applicationInstanceReference); return getInternalHostStatus(applicationInstanceReference, hostName); } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLockPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final InterProcessSemaphoreMutex mutex; private final SessionFailRetryLoop sessionFailRetryLoop; private final ApplicationInstanceReference applicationInstanceReference; private final Thread lockingThread; public ZkMutableStatusRegistry( Lock lock, InterProcessSemaphoreMutex mutex, SessionFailRetryLoop sessionFailRetryLoop, ApplicationInstanceReference applicationInstanceReference, Thread lockingThread) { this.mutex = mutex; this.lock = lock; this.sessionFailRetryLoop = sessionFailRetryLoop; this.applicationInstanceReference = applicationInstanceReference; this.lockingThread = lockingThread; } @Override public void setHostState(final HostName hostName, final HostStatus status) { setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { assertThreadHoldsLock(applicationInstanceReference); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public HostStatus getHostStatus(final HostName hostName) { return getHostStatusWithLock(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } @Override @NoThrow public void close() { synchronized (threadsHoldingLock) { threadsHoldingLock.remove(lockingThread, applicationInstanceReference); } try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } try { mutex.release(); } catch (Exception e) { if (e.getCause() instanceof SessionFailedException) { log.log(LogLevel.DEBUG, "Session expired, mutex should be freed automatically", e); } else { log.log(LogLevel.WARNING, "Failed unlocking application instance " + applicationInstanceReference, e); } } if (lockingThread != Thread.currentThread()) { throw new AssertionError("LockHandle should only be used from a single thread. " + "Application instance = " + applicationInstanceReference + " Locking thread = " + lockingThread + " Current thread = " + Thread.currentThread()); } try { sessionFailRetryLoop.close(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed closing SessionRetryLoop", e); } } } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); @GuardedBy("threadsHoldingLock") private static final Map<Thread, ApplicationInstanceReference> threadsHoldingLock = new HashMap<>(); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; private final Curator curator; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; } @Override public ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference) { return new ReadOnlyStatusRegistry() { @Override public HostStatus getHostStatus(HostName hostName) { return getInternalHostStatus(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } }; } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference applicationInstanceReference) { return lockApplicationInstance_forCurrentThreadOnly(applicationInstanceReference, 10); } @Override MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( ApplicationInstanceReference applicationInstanceReference, long timeoutSeconds) { Thread currentThread = Thread.currentThread(); assertThreadDoesNotHoldLock(currentThread,"Can't lock " + applicationInstanceReference); try { SessionFailRetryLoop sessionFailRetryLoop = curator.framework().getZookeeperClient().newSessionFailRetryLoop(Mode.FAIL); sessionFailRetryLoop.start(); try { String lockPath = applicationInstanceLockPath(applicationInstanceReference); InterProcessSemaphoreMutex mutex = acquireMutexOrThrow(timeoutSeconds, TimeUnit.SECONDS, lockPath); Lock lock2; try { String lock2Path = applicationInstanceLock2Path(applicationInstanceReference); lock2 = new Lock(lock2Path, curator); lock2.acquire(Duration.ofSeconds(timeoutSeconds)); } catch (Throwable t) { mutex.release(); throw t; } synchronized (threadsHoldingLock) { threadsHoldingLock.put(currentThread, applicationInstanceReference); } return new ZkMutableStatusRegistry( lock2, mutex, sessionFailRetryLoop, applicationInstanceReference, currentThread); } catch (Throwable t) { sessionFailRetryLoop.close(); throw t; } } catch (Exception e) { throw new RuntimeException(e); } } private void assertThreadDoesNotHoldLock(Thread currentThread, String message) { synchronized (threadsHoldingLock) { if (threadsHoldingLock.containsKey(currentThread)) { throw new AssertionError(message + ", already have a lock on " + threadsHoldingLock.get(currentThread)); } } } private InterProcessSemaphoreMutex acquireMutexOrThrow(long timeout, TimeUnit timeoutTimeUnit, String lockPath) throws Exception { InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator.framework(), lockPath); log.log(LogLevel.DEBUG, "Waiting for lock on " + lockPath); boolean acquired = mutex.acquire(timeout, timeoutTimeUnit); if (!acquired) { log.log(LogLevel.DEBUG, "Timed out waiting for lock on " + lockPath); throw new TimeoutException("Timed out waiting for lock on " + lockPath); } log.log(LogLevel.DEBUG, "Successfully acquired lock on " + lockPath); return mutex; } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { assertThreadHoldsLock(applicationInstanceReference); String path = hostAllowedDownPath(applicationInstanceReference, hostName); try { switch (status) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path,"Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); } } catch (Exception e) { throw new RuntimeException(e); } } private static void assertThreadHoldsLock(ApplicationInstanceReference applicationInstanceReference) { synchronized (threadsHoldingLock) { ApplicationInstanceReference lockedApplicationInstanceReference = threadsHoldingLock.get(Thread.currentThread()); if (lockedApplicationInstanceReference == null) { throw new AssertionError("The current thread does not own any status service locks. " + "Application Instance = " + applicationInstanceReference); } if (!lockedApplicationInstanceReference.equals(applicationInstanceReference)) { throw new AssertionError("The current thread does not have a lock on " + "application instance " + applicationInstanceReference + ", but instead have a lock on " + lockedApplicationInstanceReference); } } } private void deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); } } private void createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); } } private HostStatus getInternalHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { try { Stat statOrNull = curator.framework().checkExists().forPath( hostAllowedDownPath(applicationInstanceReference, hostName)); return (statOrNull == null) ? HostStatus.NO_REMARKS : HostStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } /** Common implementation for the two internal classes that sets ApplicationInstanceStatus. */ private ApplicationInstanceStatus getInternalApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private HostStatus getHostStatusWithLock( final ApplicationInstanceReference applicationInstanceReference, final HostName hostName) { assertThreadHoldsLock(applicationInstanceReference); return getInternalHostStatus(applicationInstanceReference, hostName); } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLockPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final InterProcessSemaphoreMutex mutex; private final SessionFailRetryLoop sessionFailRetryLoop; private final ApplicationInstanceReference applicationInstanceReference; private final Thread lockingThread; public ZkMutableStatusRegistry( Lock lock, InterProcessSemaphoreMutex mutex, SessionFailRetryLoop sessionFailRetryLoop, ApplicationInstanceReference applicationInstanceReference, Thread lockingThread) { this.mutex = mutex; this.lock = lock; this.sessionFailRetryLoop = sessionFailRetryLoop; this.applicationInstanceReference = applicationInstanceReference; this.lockingThread = lockingThread; } @Override public void setHostState(final HostName hostName, final HostStatus status) { setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { assertThreadHoldsLock(applicationInstanceReference); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public HostStatus getHostStatus(final HostName hostName) { return getHostStatusWithLock(applicationInstanceReference, hostName); } @Override public ApplicationInstanceStatus getApplicationInstanceStatus() { return getInternalApplicationInstanceStatus(applicationInstanceReference); } @Override @NoThrow public void close() { synchronized (threadsHoldingLock) { threadsHoldingLock.remove(lockingThread, applicationInstanceReference); } try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } try { mutex.release(); } catch (Exception e) { if (e.getCause() instanceof SessionFailedException) { log.log(LogLevel.DEBUG, "Session expired, mutex should be freed automatically", e); } else { log.log(LogLevel.WARNING, "Failed unlocking application instance " + applicationInstanceReference, e); } } if (lockingThread != Thread.currentThread()) { throw new AssertionError("LockHandle should only be used from a single thread. " + "Application instance = " + applicationInstanceReference + " Locking thread = " + lockingThread + " Current thread = " + Thread.currentThread()); } try { sessionFailRetryLoop.close(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed closing SessionRetryLoop", e); } } } }
"Canocical" -> "Canonical"
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanocicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanocicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanocicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanocicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); }
Map<String, String> hostToCanocicalEndpoint = new HashMap<>();
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final ZmsClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, RotationRepository rotationRepository, ZmsClientFactory zmsClientFactory, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.rotationRepository = rotationRepository; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { try (Lock lock = lock(application.id())) { Optional<Application> optionalApplication = db.getApplication(application.id()); if ( ! optionalApplication.isPresent()) continue; store(optionalApplication.get(), lock); } } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The conanical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping the endpoint in. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { if (get(id).isPresent()) throw new IllegalArgumentException("An application with id '" + id + "' already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } Application application = new Application(id); store(application, lock); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { Application application = get(applicationId).orElse(new Application(applicationId)); Version version; if (options.deployCurrentVersion) version = application.currentVersion(controller, zone); else if (application.deploymentJobs().isSelfTriggering()) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.currentDeployVersion(controller, zone); if ( ! application.deploymentJobs().isSelfTriggering() && ! zone.environment().isManuallyDeployed() && ! application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as pending " + application.deploying().get() + " is untested"); DeploymentJobs.JobType jobType = DeploymentJobs.JobType.from(controller.zoneRegistry().system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! triggeredWith(revision, application, jobType) && !zone.environment().isManuallyDeployed() && jobType != null) { application = application.with(application.deploymentJobs().withTriggering(jobType, version, Optional.of(revision), clock.instant())); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application, lock); } DeploymentId deploymentId = new DeploymentId(applicationId, zone); ApplicationRotation rotationInDns = registerRotationInDns(deploymentId, getOrAssignRotation(deploymentId, applicationPackage)); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(deploymentId, options, rotationInDns.cnames(), rotationInDns.rotations(), applicationPackage.zippedContent()); preparedApplication.activate(); application = application.with(new Deployment(zone, revision, version, clock.instant())); store(application, lock); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private Application deleteRemovedDeployments(Application application) { List<Deployment> deploymentsToRemove = application.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.prod) .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); Application applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment, false); return applicationWithRemoval; } private Application deleteUnreferencedDeploymentJobs(Application application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } private boolean triggeredWith(ApplicationRevision revision, Application application, DeploymentJobs.JobType jobType) { if (jobType == null) return false; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; if ( ! status.lastTriggered().isPresent()) return false; JobStatus.JobRun triggered = status.lastTriggered().get(); if ( ! triggered.revision().isPresent()) return false; return triggered.revision().get().equals(revision); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } private ApplicationRotation registerRotationInDns(DeploymentId deploymentId, ApplicationRotation applicationRotation) { ApplicationAlias alias = new ApplicationAlias(deploymentId.applicationId()); if (applicationRotation.rotations().isEmpty()) return applicationRotation; Rotation rotation = applicationRotation.rotations().iterator().next(); String endpointName = alias.toString(); try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, endpointName); if (!record.isPresent()) { RecordId recordId = nameService.createCname(endpointName, rotation.rotationName); log.info("Registered mapping with record ID " + recordId.id() + ": " + endpointName + " -> " + rotation.rotationName); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } return new ApplicationRotation(Collections.singleton(endpointName), Collections.singleton(rotation)); } private ApplicationRotation getOrAssignRotation(DeploymentId deploymentId, ApplicationPackage applicationPackage) { if (deploymentId.zone().environment().equals(Environment.prod)) { return new ApplicationRotation(Collections.emptySet(), rotationRepository.getOrAssignRotation(deploymentId.applicationId(), applicationPackage.deploymentSpec())); } else { return new ApplicationRotation(Collections.emptySet(), Collections.emptySet()); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId, e); return Optional.empty(); } } /** * Deletes the application with this id * * @return the deleted application, or null if it did not exist * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public Application deleteApplication(ApplicationId id, Optional<NToken> token) { try (Lock lock = lock(id)) { Optional<Application> application = get(id); if ( ! application.isPresent()) return null; if ( ! application.get().deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application.get()); return application.get(); } } public void setJiraIssueId(ApplicationId id, Optional<String> jiraIssueId) { try (Lock lock = lock(id)) { get(id).ifPresent(application -> store(application.withJiraIssueId(jiraIssueId), lock)); } } /** * Replace any previous version of this application by this instance * * @param application the application version to store * @param lock the lock held on this application since before modification started */ @SuppressWarnings("unused") public void store(Application application, Lock lock) { db.store(application); } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } public void restart(DeploymentId deploymentId) { try { configserverClient.restart(deploymentId, Optional.empty()); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } public void restartHost(DeploymentId deploymentId, Hostname hostname) { try { configserverClient.restart(deploymentId, Optional.of(hostname)); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public Application deactivate(Application application, Zone zone) { return deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public Application deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { return deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private Application deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { try (Lock lock = lock(application.id())) { if (deployment.isPresent() && requireThatDeploymentHasExpired && ! DeploymentExpirer.hasExpired( controller.zoneRegistry(), deployment.get(), clock.instant())) { return application; } try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } application = application.withoutDeploymentIn(zone); store(application, lock); return application; } } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ public Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } private static final class ApplicationRotation { private final ImmutableSet<String> cnames; private final ImmutableSet<Rotation> rotations; public ApplicationRotation(Set<String> cnames, Set<Rotation> rotations) { this.cnames = ImmutableSet.copyOf(cnames); this.rotations = ImmutableSet.copyOf(rotations); } public Set<String> cnames() { return cnames; } public Set<Rotation> rotations() { return rotations; } } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final RotationRepository rotationRepository; private final ZmsClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, RotationRepository rotationRepository, ZmsClientFactory zmsClientFactory, NameService nameService, ConfigServerClient configserverClient, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.rotationRepository = rotationRepository; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { try (Lock lock = lock(application.id())) { Optional<Application> optionalApplication = db.getApplication(application.id()); if ( ! optionalApplication.isPresent()) continue; store(optionalApplication.get(), lock); } } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().startsWith("default-pr"))) throw new UnsupportedOperationException("Only the instance names 'default' and names starting with 'default-pr' are supported at the moment"); try (Lock lock = lock(id)) { if (get(id).isPresent()) throw new IllegalArgumentException("An application with id '" + id + "' already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); if (tenant.get().isAthensTenant()) { ZmsClient zmsClient = zmsClientFactory.createClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } Application application = new Application(id); store(application, lock); log.info("Created " + application); return application; } } /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage, DeployOptions options) { try (Lock lock = lock(applicationId)) { Application application = get(applicationId).orElse(new Application(applicationId)); Version version; if (options.deployCurrentVersion) version = application.currentVersion(controller, zone); else if (application.deploymentJobs().isSelfTriggering()) version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed()) return unexpectedDeployment(applicationId, zone, applicationPackage); else version = application.currentDeployVersion(controller, zone); if ( ! application.deploymentJobs().isSelfTriggering() && ! zone.environment().isManuallyDeployed() && ! application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying())) throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as pending " + application.deploying().get() + " is untested"); DeploymentJobs.JobType jobType = DeploymentJobs.JobType.from(controller.zoneRegistry().system(), zone); ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); if ( ! options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); if (options.screwdriverBuildJob.isPresent() && options.screwdriverBuildJob.get().screwdriverId != null) application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value()); if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange) application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision))); if ( ! triggeredWith(revision, application, jobType) && !zone.environment().isManuallyDeployed() && jobType != null) { application = application.with(application.deploymentJobs().withTriggering(jobType, version, Optional.of(revision), clock.instant())); } application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application, lock); } DeploymentId deploymentId = new DeploymentId(applicationId, zone); ApplicationRotation rotationInDns = registerRotationInDns(deploymentId, getOrAssignRotation(deploymentId, applicationPackage)); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(deploymentId, options, rotationInDns.cnames(), rotationInDns.rotations(), applicationPackage.zippedContent()); preparedApplication.activate(); application = application.with(new Deployment(zone, revision, version, clock.instant())); store(application, lock); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse()); } } private ActivateResult unexpectedDeployment(ApplicationId applicationId, Zone zone, ApplicationPackage applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + get(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.hash()), prepareResponse); } private Application deleteRemovedDeployments(Application application) { List<Deployment> deploymentsToRemove = application.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.prod) .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); Application applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment, false); return applicationWithRemoval; } private Application deleteUnreferencedDeploymentJobs(Application application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<Zone> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(Zone::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } private boolean triggeredWith(ApplicationRevision revision, Application application, DeploymentJobs.JobType jobType) { if (jobType == null) return false; JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; if ( ! status.lastTriggered().isPresent()) return false; JobStatus.JobRun triggered = status.lastTriggered().get(); if ( ! triggered.revision().isPresent()) return false; return triggered.revision().get().equals(revision); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationRevision toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> screwDriverBuildJob) { if ( ! screwDriverBuildJob.isPresent()) return ApplicationRevision.from(applicationPackage.hash()); GitRevision gitRevision = screwDriverBuildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationRevision.from(applicationPackage.hash()); return ApplicationRevision.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } private ApplicationRotation registerRotationInDns(DeploymentId deploymentId, ApplicationRotation applicationRotation) { ApplicationAlias alias = new ApplicationAlias(deploymentId.applicationId()); if (applicationRotation.rotations().isEmpty()) return applicationRotation; Rotation rotation = applicationRotation.rotations().iterator().next(); String endpointName = alias.toString(); try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, endpointName); if (!record.isPresent()) { RecordId recordId = nameService.createCname(endpointName, rotation.rotationName); log.info("Registered mapping with record ID " + recordId.id() + ": " + endpointName + " -> " + rotation.rotationName); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } return new ApplicationRotation(Collections.singleton(endpointName), Collections.singleton(rotation)); } private ApplicationRotation getOrAssignRotation(DeploymentId deploymentId, ApplicationPackage applicationPackage) { if (deploymentId.zone().environment().equals(Environment.prod)) { return new ApplicationRotation(Collections.emptySet(), rotationRepository.getOrAssignRotation(deploymentId.applicationId(), applicationPackage.deploymentSpec())); } else { return new ApplicationRotation(Collections.emptySet(), Collections.emptySet()); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId, e); return Optional.empty(); } } /** * Deletes the application with this id * * @return the deleted application, or null if it did not exist * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public Application deleteApplication(ApplicationId id, Optional<NToken> token) { try (Lock lock = lock(id)) { Optional<Application> application = get(id); if ( ! application.isPresent()) return null; if ( ! application.get().deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (tenant.isAthensTenant()) zmsClientFactory.createClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); db.deleteApplication(id); log.info("Deleted " + application.get()); return application.get(); } } public void setJiraIssueId(ApplicationId id, Optional<String> jiraIssueId) { try (Lock lock = lock(id)) { get(id).ifPresent(application -> store(application.withJiraIssueId(jiraIssueId), lock)); } } /** * Replace any previous version of this application by this instance * * @param application the application version to store * @param lock the lock held on this application since before modification started */ @SuppressWarnings("unused") public void store(Application application, Lock lock) { db.store(application); } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } public void restart(DeploymentId deploymentId) { try { configserverClient.restart(deploymentId, Optional.empty()); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } public void restartHost(DeploymentId deploymentId, Hostname hostname) { try { configserverClient.restart(deploymentId, Optional.of(hostname)); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public Application deactivate(Application application, Zone zone) { return deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public Application deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { return deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private Application deactivate(Application application, Zone zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { try (Lock lock = lock(application.id())) { if (deployment.isPresent() && requireThatDeploymentHasExpired && ! DeploymentExpirer.hasExpired( controller.zoneRegistry(), deployment.get(), clock.instant())) { return application; } try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } application = application.withoutDeploymentIn(zone); store(application, lock); return application; } } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ public Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } private static final class ApplicationRotation { private final ImmutableSet<String> cnames; private final ImmutableSet<Rotation> rotations; public ApplicationRotation(Set<String> cnames, Set<Rotation> rotations) { this.cnames = ImmutableSet.copyOf(cnames); this.rotations = ImmutableSet.copyOf(rotations); } public Set<String> cnames() { return cnames; } public Set<Rotation> rotations() { return rotations; } } }
I suggest we replace with "... always empty since the config id used here is not the config id of the container, which is the config producer for this config"
private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(findMyIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); }
b.myidFile("var/controller-zookeeper/myid");
private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static Integer findMyIndex(ClusterInfoConfig clusterInfo) { try { String hostname = InetAddress.getLocalHost().getHostName(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } catch (UnknownHostException e) { throw new UncheckedIOException(e); } } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Version readSystemVersion() { Optional<byte[]> data = curator.getData(systemVersionPath()); if (! data.isPresent() || data.get().length == 0) return Vtag.currentVersion; return Version.fromString(new String(data.get(), StandardCharsets.UTF_8)); } public void writeSystemVersion(Version version) { NestedTransaction transaction = new NestedTransaction(); curator.set(systemVersionPath(), version.toString().getBytes(StandardCharsets.UTF_8)); transaction.commit(); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Version readSystemVersion() { Optional<byte[]> data = curator.getData(systemVersionPath()); if (! data.isPresent() || data.get().length == 0) return Vtag.currentVersion; return Version.fromString(new String(data.get(), StandardCharsets.UTF_8)); } public void writeSystemVersion(Version version) { NestedTransaction transaction = new NestedTransaction(); curator.set(systemVersionPath(), version.toString().getBytes(StandardCharsets.UTF_8)); transaction.commit(); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } }
Actually not empty but null, which seems to be a bug to me, but I suggest we just remove both the ClusterMemberInfoConfig and the comment ...
private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(findMyIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); }
b.myidFile("var/controller-zookeeper/myid");
private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static Integer findMyIndex(ClusterInfoConfig clusterInfo) { try { String hostname = InetAddress.getLocalHost().getHostName(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } catch (UnknownHostException e) { throw new UncheckedIOException(e); } } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Version readSystemVersion() { Optional<byte[]> data = curator.getData(systemVersionPath()); if (! data.isPresent() || data.get().length == 0) return Vtag.currentVersion; return Version.fromString(new String(data.get(), StandardCharsets.UTF_8)); } public void writeSystemVersion(Version version) { NestedTransaction transaction = new NestedTransaction(); curator.set(systemVersionPath(), version.toString().getBytes(StandardCharsets.UTF_8)); transaction.commit(); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Version readSystemVersion() { Optional<byte[]> data = curator.getData(systemVersionPath()); if (! data.isPresent() || data.get().length == 0) return Vtag.currentVersion; return Version.fromString(new String(data.get(), StandardCharsets.UTF_8)); } public void writeSystemVersion(Version version) { NestedTransaction transaction = new NestedTransaction(); curator.set(systemVersionPath(), version.toString().getBytes(StandardCharsets.UTF_8)); transaction.commit(); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } }
> Or, exclude above those upgrading to BROKEN version? What does that even mean?
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
The loop above picks out failing applications. We should leave out those upgrading to a BROKEN version. I guess a pair of commas could have helped (although it would still be awkward).
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
I was too quick with this. Obviously you cannot write this error message here.
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
.. nor the comment above.
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
Ok.
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
Fixed.
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } synchronized (classLocking) { classLocking.notifyAll(); } signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.info(objectToString() + ": Releasing lock"); if (classLock != null) { classLock.close(); } log.info(objectToString() + ": Stop complete"); }
if (classLock != null) {
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private ClassLock classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = classLocking.tryLock(NodeAdminStateUpdater.class, () -> !terminated.get()); } catch (LockInterruptException e) { return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
I assume you mean the error message needs to be provided by the implementation of Properties, or some such? (In addition to this being poor code style, with a template in the midst of logic ...)
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
The comment comment you need to comment on, though.
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
Remember: This is an open source project. It has none of these things mentioned in that comment. I think you shouldn't bother doing something more complicated than removing the comment and removing any mention of a specific system from the message.
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
I had a rework of the whole thing in mind, leaving only the detect-problems-and-report part in the DeploymentIssueReporter, and delegating the chains PropertyId (barely ok) --OpsDB--> Classification --JIRA--> issueKey and IssueInfo (with assignee) --OpsDB--> UserContact escalationTarget --JIRA--> void (but escalated issue) to some ```java interface OrganizationDatabase { String fileFor(Application application, Issue issue); void escalateIssue(IssueInfo issueInfo); } ``` or something. Sounds OK, or too much (pointless) work?
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
Agree, that sounds good.
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
.append("\n\nNote: The 'Queue Component' field in [opsdb|https:
private void maintainDeploymentIssues(List<Application> applications) { Collection<Application> failingApplications = new ArrayList<>(); for (Application application : applications) if (failingSinceBefore(application.deploymentJobs(), controller().clock().instant().minus(maxFailureAge))) failingApplications.add(application); else controller().applications().setJiraIssueId(application.id(), Optional.empty()); if (failingApplications.size() > 0.2 * applications.size()) { fileOrUpdate(manyFailingDeploymentsIssueFrom(failingApplications)); } else { for (Application application : failingApplications) { Issue deploymentIssue = deploymentIssueFrom(application); Tenant applicationTenant = null; Classification applicationOwner = null; try { applicationTenant= ownerOf(application); applicationOwner = jiraClassificationOf(applicationTenant); fileFor(application, deploymentIssue.with(applicationOwner)); } catch (RuntimeException e) { Pattern componentError = Pattern.compile(".*Component name '.*' is not valid.*", Pattern.DOTALL); if (componentError.matcher(e.getMessage()).matches()) fileFor(application, deploymentIssue .with(applicationOwner.withComponent(null)) .append("\n\nNote: The 'Queue Component' field in [opsdb|https: applicationTenant.getPropertyId().get() + "&action=view] for your property was rejected by JIRA. Please check your spelling.")); else fileFor(application, deploymentIssue.append(e.getMessage() + "\n\nAddressee:\n" + applicationOwner)); } } } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
class DeploymentIssueReporter extends Maintainer { static final Duration maxFailureAge = Duration.ofDays(2); static final Duration maxInactivityAge = Duration.ofDays(4); static final String deploymentFailureLabel = "vespaDeploymentFailure"; static final Classification vespaOps = new Classification("VESPA", "Services", deploymentFailureLabel); static final UserContact terminalUser = new UserContact("frodelu", "Frode Lundgren", admin); private final Contacts contacts; private final Properties properties; private final Issues issues; DeploymentIssueReporter(Controller controller, Contacts contacts, Properties properties, Issues issues, Duration maintenanceInterval, JobControl jobControl) { super(controller, maintenanceInterval, jobControl); this.contacts = contacts; this.properties = properties; this.issues = issues; } @Override protected void maintain() { maintainDeploymentIssues(controller().applications().asList()); escalateInactiveDeploymentIssues(controller().applications().asList()); } /** * File issues for applications which have failed deployment for longer than @maxFailureAge * and store the issue id for the filed issues. Also, clear the @issueIds of applications * where deployment has not failed for this amount of time. */ /** Returns whether @deploymentJobs has a job which has been failing since before @failureThreshold or not. */ private boolean failingSinceBefore(DeploymentJobs deploymentJobs, Instant failureThreshold) { return deploymentJobs.hasFailures() && deploymentJobs.failingSince().isBefore(failureThreshold); } private Tenant ownerOf(Application application) { return controller().tenants().tenant(new TenantId(application.id().tenant().value())).get(); } /** Use the @propertyId of @tenant, if present, to look up JIRA information in OpsDB. */ private Classification jiraClassificationOf(Tenant tenant) { Long propertyId = tenant.getPropertyId().map(PropertyId::value).orElseThrow(() -> new NoSuchElementException("No property id is listed for " + tenant)); Classification classification = properties.classificationFor(propertyId).orElseThrow(() -> new NoSuchElementException("No property was found with id " + propertyId)); return classification.withLabel(deploymentFailureLabel); } /** File @issue for @application, if @application doesn't already have an @Issue associated with it. */ private void fileFor(Application application, Issue issue) { Optional<String> ourIssueId = application.deploymentJobs().jiraIssueId() .filter(jiraIssueId -> issues.fetch(jiraIssueId).status() != done); if ( ! ourIssueId.isPresent()) controller().applications().setJiraIssueId(application.id(), Optional.of(issues.file(issue))); } /** File @issue, or update a JIRA issue representing the same issue. */ private void fileOrUpdate(Issue issue) { Optional<String> jiraIssueId = issues.fetchSimilarTo(issue) .stream().findFirst().map(Issues.IssueInfo::id); if (jiraIssueId.isPresent()) issues.update(jiraIssueId.get(), issue.description()); else issues.file(issue); } /** Escalate JIRA issues for which there has been no activity for a set amount of time. */ private void escalateInactiveDeploymentIssues(List<Application> applications) { applications.forEach(application -> application.deploymentJobs().jiraIssueId().ifPresent(jiraIssueId -> { Issues.IssueInfo issueInfo = issues.fetch(jiraIssueId); if (issueInfo.updated().isBefore(controller().clock().instant().minus(maxInactivityAge))) escalateAndComment(issueInfo, application); })); } /** Reassign the JIRA issue for @application one step up in the OpsDb escalation chain, and add an explanatory comment to it. */ private void escalateAndComment(IssueInfo issueInfo, Application application) { Optional<String> assignee = issueInfo.assignee(); if (assignee.isPresent()) { if (assignee.get().equals(terminalUser.username())) return; issues.addWatcher(issueInfo.id(), assignee.get()); } Long propertyId = ownerOf(application).getPropertyId().get().value(); UserContact escalationTarget = contacts.escalationTargetFor(propertyId, assignee.orElse("no one")); if (escalationTarget.is(assignee.orElse("no one"))) escalationTarget = terminalUser; String comment = deploymentIssueEscalationComment(application, propertyId, assignee.orElse("anyone")); issues.comment(issueInfo.id(), comment); issues.reassign(issueInfo.id(), escalationTarget.username()); } Issue deploymentIssueFrom(Application application) { return new Issue(deploymentIssueSummary(application), deploymentIssueDescription(application)) .with(vespaOps); } Issue manyFailingDeploymentsIssueFrom(Collection<Application> applications) { return new Issue( "More than 20% of Hosted Vespa deployments are failing", applications.stream() .map(application -> "[" + application.id().toShortString() + "|" + toUrl(application.id()) + "]") .collect(Collectors.joining("\n")), vespaOps); } private static String toShortString(ApplicationId id) { return id.tenant().value() + "." + id.application().value() + ( id.instance().isDefault() ? "" : "." + id.instance().value() ); } private String toUrl(ApplicationId applicationId) { return controller().zoneRegistry().getDashboardUri().resolve("/apps" + "/tenant/" + applicationId.tenant().value() + "/application/" + applicationId.application().value()).toString(); } private String toOpsDbUrl(long propertyId) { return contacts.contactsUri(propertyId).toString(); } /** Returns the summary text what will be assigned to a new issue */ private static String deploymentIssueSummary(Application application) { return "[" + toShortString(application.id()) + "] Action required: Repair deployment"; } /** Returns the description text what will be assigned to a new issue */ private String deploymentIssueDescription(Application application) { return "Deployment jobs of the Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "] have been failing " + "continuously for over 48 hours. This blocks any change to this application from being deployed " + "and will also block global rollout of new Vespa versions for everybody.\n\n" + "Please assign your highest priority to fixing this. If you need support, request it using " + "[yo/vespa-support|http: "If this application is not in use, please re-assign this issue to project \"VESPA\" " + "with component \"Services\", and ask for the application to be removed.\n\n" + "If we do not get a response on this issue, we will auto-escalate it."; } /** Returns the comment text that what will be added to an issue each time it is escalated */ private String deploymentIssueEscalationComment(Application application, long propertyId, String priorAssignee) { return "This issue tracks the failing deployment of Vespa application " + "[" + toShortString(application.id()) + "|" + toUrl(application.id()) + "]. " + "Since we have not received a response from " + priorAssignee + ", we are escalating to you, " + "based on [your OpsDb information|" + toOpsDbUrl(propertyId) + "]. " + "Please acknowledge this issue and assign somebody to " + "fix it as soon as possible.\n\n" + "If we do not receive a response we will keep auto-escalating this issue. " + "If we run out of escalation options for your OpsDb property, we will assume this application " + "is not managed by anyone and DELETE it. In the meantime, this issue will block global deployment " + "of Vespa for the entire company."; } }
A better test than !isMaster() may be that the task has failed?
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
if (!isMaster() || !task.hasVersionAckDependency()) {
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Yes, good point. I'll do this in a follow-up PR after I create a PR for temporarily disabling the version ACK dependency for set-node-state tasks. Need to do some more thinking about how the orchestrator will interact with the controller during leader elections. Not entirely comfortable with how threads will be blocked during the election grace period. The simplest solution might be to just bounce requests with "leader not known" failures if we're in the grace period. The client should then retry round-robin until it ends back up on the leader controller, eventually after its grace period has expired and it's a "proper" leader. Should also consider an explicit request timeout mechanism to avoid hanging threads for unbounded times if distributors in the cluster are failing to converge on a particular state.
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
if (!isMaster() || !task.hasVersionAckDependency()) {
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Bouncing w/retries should work already. timeout sounds right imo, but there should also be timeout on server-side. There was also some mentioning of filled up thread pools, which should also be looked into - or perhaps that works as expected?
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
if (!isMaster() || !task.hasVersionAckDependency()) {
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
To clarify, my suggestions were all for changes in the behavior of the _cluster controller_, not the orchestrator. I.e. the request bounces and timeout handling would be done server-side. We already check pending tasks per controller tick, so straight forward to add an ACK deadline to them. The thread pools, as far as I understand it, are filled up because they use a thread-per-request model. They explicitly block waiting for the completion of the `RemoteClusterControllerTask` instance they have scheduled. Prior to the fix in this PR, requests towards follower controllers would not have their tasks completed. In that respect, it's expected behavior from the pools.
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
if (!isMaster() || !task.hasVersionAckDependency()) {
private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); final RemoteClusterControllerTask task = remoteTasks.poll(); log.finest(() -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(context); if (!isMaster() || !task.hasVersionAckDependency()) { log.finest(() -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { log.finest(() -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } return false; }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static Logger log = Logger.getLogger(FleetController.class.getName()); private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new LinkedList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override public long getConfigGeneration() { return configGeneration; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) throws Exception { log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName()); this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController createForContainer(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { Timer timer = new RealTimer(); return create(options, timer, statusPageServer, null, metricReporter); } public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception { Timer timer = new RealTimer(); RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort); return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter()); } private static FleetController create(FleetControllerOptions options, Timer timer, StatusPageServerInterface statusPageServer, RpcServer rpcServer, MetricReporter metricReporter) throws Exception { MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex); EventLog log = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log); Communicator communicator = new RPCCommunicator( timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return masterElectionHandler.isMaster(); } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { log.fine("Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { synchronized (systemStateListeners) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) throw new NullPointerException("Cluster state should never be null at this point"); listener.handleNewSystemState(state); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getHttpPort() { return statusPageServer.getPort(); } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { log.log(LogLevel.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(this); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options, long configGeneration) { synchronized(monitor) { assert(this.options.fleetControllerIndex == options.fleetControllerIndex); log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options"); nextOptions = options.clone(); nextConfigGeneration = configGeneration; monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { verifyInControllerThread(); newStates.add(state); metricUpdater.updateClusterStateMetrics(cluster, state); systemStateBroadcaster.handleNewSystemState(state); if (masterElectionHandler.isMaster()) { storeClusterStateVersionToZooKeeper(state); } } private void storeClusterStateVersionToZooKeeper(ClusterState state) { try { database.saveLatestSystemStateVersion(databaseContext, state.getVersion()); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indexes of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); masterElectionHandler.lostDatabaseConnection(); } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleLeadershipLost(); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleLeadershipLost(); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); ClusterState currentState = stateVersionTracker.getVersionedClusterState(); log.fine(() -> String.format("All distributors have ACKed cluster state version %d", currentState.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentState, nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) return true; } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() throws java.io.IOException, ListenFailedException { verifyInControllerThread(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) ((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes); cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp); cluster.setMinStorageNodesUp(options.minStorageNodesUp); database.setZooKeeperAddress(options.zooKeeperServerAddress); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); configGeneration = nextConfigGeneration; nextConfigGeneration = -1; } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; String hiddenMessage = ""; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTrace(e); log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= stateGatherer.processResponses(this); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= systemStateBroadcaster.processResponses(); if ( ! isRunning()) { return; } if (masterElectionHandler.isMaster()) { didWork |= broadcastClusterStateToEligibleNodes(); didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= processAnyPendingStatusPageRequest(); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } if ( ! isRunning()) { return; } didWork |= processNextQueuedRemoteTask(); didWork |= completeSatisfiedVersionDependentTasks(); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } if ( ! didWork && ! waitingForCycle) { monitor.wait(options.cycleWaitTime); } if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() throws InterruptedException { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw (InterruptedException) new InterruptedException("Interrupted").initCause(e); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString()); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException { boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (currentTime < firstAllowedStateBroadcast) { log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely."); firstAllowedStateBroadcast = currentTime; } sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterState state : newStates) { for(SystemStateListener listener : systemStateListeners) { listener.handleNewSystemState(state); } } newStates.clear(); } } } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentState = consolidatedClusterState(); context.masterInfo = masterElectionHandler; context.nodeStateOrHostInfoChangeHandler = this; context.nodeAddedOrRemovedListener = this; return context; } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { log.fine(() -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = database.loadWantedStates(databaseContext); didWork |= database.loadStartTimestamps(cluster); } didWork |= nodeLookup.updateCluster(cluster, this); didWork |= stateGatherer.sendMessages(cluster, communicator, this); didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); stateVersionTracker.updateLatestCandidateState(candidate); if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) { final long timeNowMs = timer.getCurrentTimeInMillis(); final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); handleNewSystemState(stateVersionTracker.getVersionedClusterState()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { log.finest(() -> String.format("Adding task of type '%s' to be completed at version %d", task.getClass().getName(), completeAtVersion)); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, final AnnotatedClusterState toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs)); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); } private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); } isMaster = true; if (wantedStateChanged) { database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; metricUpdater.noLongerMaster(); failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; } return didWork; } public void run() { controllerThreadId = Thread.currentThread().getId(); try { processingCycle = true; while( isRunning() ) { tick(); } } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } } public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(long timeoutMS) { long endTime = System.currentTimeMillis() + timeoutMS; synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms."); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeout; synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfo()) { if (node.getSystemStateVersionAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds."); } monitor.wait(10); } } } public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException { long maxTime = System.currentTimeMillis() + timeoutMillis; synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfo()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; long remainingTime = maxTime - System.currentTimeMillis(); if (remainingTime <= 0) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
No longer called deconstruct
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
log.log(LogLevel.INFO, objectToString() + ": Deconstruct called");
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
Just join()
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
loopThread.join(0);
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
No longer called deconstruct
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete");
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
Would it be possible to encapsulate these executor services in a class to hide away the stopping of them? They're otherwise polluting the stop functions of several classes.
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
why not lock.lock()?
public Lock lock(Class<?> key) { try { ReentrantLock lock = locks.computeIfAbsent(key, k -> new ReentrantLock(true)); lock.tryLock(Long.MAX_VALUE, TimeUnit.NANOSECONDS); return new Lock(lock); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for lock of " + key); } }
lock.tryLock(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
public Lock lock(Class<?> key) { ReentrantLock lock = locks.computeIfAbsent(key, k -> new ReentrantLock(true)); lock.lock(); return new Lock(lock); }
class Locking { private final Map<Class<?>, ReentrantLock> locks = new ConcurrentHashMap<>(); /** * Locks class. This will block until the lock is acquired. * Users of this <b>must</b> close any lock acquired. * * @param key the key to lock * @return the acquired lock */ }
class Locking { private final Map<Class<?>, ReentrantLock> locks = new ConcurrentHashMap<>(); /** * Locks class. This will block until the lock is acquired. * Users of this <b>must</b> close any lock acquired. * * @param key the key to lock * @return the acquired lock */ }
Fixed.
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
log.log(LogLevel.INFO, objectToString() + ": Deconstruct called");
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
Fixed.
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
loopThread.join(0);
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
Fixed.
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } log.log(LogLevel.INFO, objectToString() + ": Deconstruct called"); signalWorkToBeDone(); do { try { loopThread.join(0); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete"); }
log.log(LogLevel.INFO, objectToString() + ": Deconstruct complete");
public void stop() { specVerifierScheduler.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final NodeAdmin nodeAdmin; private final Clock clock; private final Orchestrator orchestrator; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { log.log(LogLevel.INFO, objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final StorageMaintainer storageMaintainer; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.storageMaintainer = storageMaintainer; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.lastTick = clock.instant(); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { if (loopThread != null) { throw new RuntimeException("Can not restart NodeAdminStateUpdater"); } loopThread = new Thread(() -> { while (! terminated.get()) tick(); }); loopThread.setName("tick-NodeAdminStateUpdater"); loopThread.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); } }
Fixed.
public Lock lock(Class<?> key) { try { ReentrantLock lock = locks.computeIfAbsent(key, k -> new ReentrantLock(true)); lock.tryLock(Long.MAX_VALUE, TimeUnit.NANOSECONDS); return new Lock(lock); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for lock of " + key); } }
lock.tryLock(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
public Lock lock(Class<?> key) { ReentrantLock lock = locks.computeIfAbsent(key, k -> new ReentrantLock(true)); lock.lock(); return new Lock(lock); }
class Locking { private final Map<Class<?>, ReentrantLock> locks = new ConcurrentHashMap<>(); /** * Locks class. This will block until the lock is acquired. * Users of this <b>must</b> close any lock acquired. * * @param key the key to lock * @return the acquired lock */ }
class Locking { private final Map<Class<?>, ReentrantLock> locks = new ConcurrentHashMap<>(); /** * Locks class. This will block until the lock is acquired. * Users of this <b>must</b> close any lock acquired. * * @param key the key to lock * @return the acquired lock */ }
or `if (table.isNativeTableOrMaterializedView() || table.isHiveTable())`
private boolean supportRefreshByPartition(Table table) { if (table.isLocalTable() || table.isHiveTable() || table.isCloudNativeTable()) { return true; } return false; }
if (table.isLocalTable() || table.isHiveTable() || table.isCloudNativeTable()) {
private boolean supportRefreshByPartition(Table table) { if (table.isOlapTableOrMaterializedView() || table.isHiveTable() || table.isCloudNativeTable()) { return true; } return false; }
class PartitionBasedMaterializedViewRefreshProcessor extends BaseTaskRunProcessor { private static final Logger LOG = LogManager.getLogger(PartitionBasedMaterializedViewRefreshProcessor.class); public static final String MV_ID = "mvId"; private static final int MAX_RETRY_NUM = 10; private Database database; private MaterializedView materializedView; private MvTaskRunContext mvContext; private Map<Long, Pair<BaseTableInfo, Table>> snapshotBaseTables; @VisibleForTesting public MvTaskRunContext getMvContext() { return mvContext; } @VisibleForTesting public void setMvContext(MvTaskRunContext mvContext) { this.mvContext = mvContext; } @Override public void processTaskRun(TaskRunContext context) throws Exception { prepare(context); InsertStmt insertStmt = null; ExecPlan execPlan = null; int retryNum = 0; boolean checked = false; while (!checked) { syncPartitions(); database.readLock(); try { refreshExternalTable(context); if (checkBaseTablePartitionChange()) { retryNum++; if (retryNum > MAX_RETRY_NUM) { throw new DmlException("materialized view:%s refresh task failed", materializedView.getName()); } LOG.info("materialized view:{} base partition has changed. retry to sync partitions, retryNum:{}", materializedView.getName(), retryNum); continue; } checked = true; Set<String> partitionsToRefresh = getPartitionsToRefreshForMaterializedView(context.getProperties()); if (partitionsToRefresh.isEmpty()) { LOG.info("no partitions to refresh for materialized view {}", materializedView.getName()); return; } filterPartitionByRefreshNumber(partitionsToRefresh, materializedView); LOG.debug("materialized view partitions to refresh:{}", partitionsToRefresh); Map<String, Set<String>> sourceTablePartitions = getSourceTablePartitions(partitionsToRefresh); LOG.debug("materialized view:{} source partitions :{}", materializedView.getName(), sourceTablePartitions); if (this.getMVTaskRunExtraMessage() != null) { MVTaskRunExtraMessage extraMessage = getMVTaskRunExtraMessage(); extraMessage.setMvPartitionsToRefresh(partitionsToRefresh); extraMessage.setBasePartitionsToRefreshMap(sourceTablePartitions); } insertStmt = generateInsertStmt(partitionsToRefresh, sourceTablePartitions); execPlan = generateRefreshPlan(mvContext.getCtx(), insertStmt); if (mvContext.getCtx().getSessionVariable().isEnableOptimizerTraceLog()) { StringBuffer sb = new StringBuffer(); sb.append(String.format("[TRACE QUERY] MV: %s\n", materializedView.getName())); sb.append(String.format("MV PartitionsToRefresh: %s \n", Joiner.on(",").join(partitionsToRefresh))); if (sourceTablePartitions != null) { sb.append(String.format("Base PartitionsToScan:%s\n", sourceTablePartitions)); } sb.append("Insert Plan:\n"); sb.append(execPlan.getExplainString(StatementBase.ExplainLevel.VERBOSE)); LOG.info(sb.toString()); } mvContext.setExecPlan(execPlan); } finally { database.readUnlock(); } } refreshMaterializedView(mvContext, execPlan, insertStmt); updateMeta(execPlan); if (mvContext.hasNextBatchPartition()) { generateNextTaskRun(); } } public MVTaskRunExtraMessage getMVTaskRunExtraMessage() { if (this.mvContext.status == null) { return null; } return this.mvContext.status.getMvTaskRunExtraMessage(); } @VisibleForTesting public void filterPartitionByRefreshNumber(Set<String> partitionsToRefresh, MaterializedView materializedView) { int partitionRefreshNumber = materializedView.getTableProperty().getPartitionRefreshNumber(); if (partitionRefreshNumber <= 0) { return; } Map<String, Range<PartitionKey>> rangePartitionMap = materializedView.getRangePartitionMap(); if (partitionRefreshNumber >= rangePartitionMap.size()) { return; } Map<String, Range<PartitionKey>> mappedPartitionsToRefresh = Maps.newHashMap(); for (String partitionName : partitionsToRefresh) { mappedPartitionsToRefresh.put(partitionName, rangePartitionMap.get(partitionName)); } LinkedHashMap<String, Range<PartitionKey>> sortedPartition = mappedPartitionsToRefresh.entrySet().stream() .sorted(Map.Entry.comparingByValue(RangeUtils.RANGE_COMPARATOR)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -> e1, LinkedHashMap::new)); Iterator<String> partitionNameIter = sortedPartition.keySet().iterator(); for (int i = 0; i < partitionRefreshNumber; i++) { if (partitionNameIter.hasNext()) { partitionNameIter.next(); } } String nextPartitionStart = null; String endPartitionName = null; if (partitionNameIter.hasNext()) { String startPartitionName = partitionNameIter.next(); Range<PartitionKey> partitionKeyRange = mappedPartitionsToRefresh.get(startPartitionName); LiteralExpr lowerExpr = partitionKeyRange.lowerEndpoint().getKeys().get(0); nextPartitionStart = AnalyzerUtils.parseLiteralExprToDateString(lowerExpr, 0); endPartitionName = startPartitionName; partitionsToRefresh.remove(endPartitionName); } while (partitionNameIter.hasNext()) { endPartitionName = partitionNameIter.next(); partitionsToRefresh.remove(endPartitionName); } mvContext.setNextPartitionStart(nextPartitionStart); if (endPartitionName != null) { LiteralExpr upperExpr = mappedPartitionsToRefresh.get(endPartitionName).upperEndpoint().getKeys().get(0); mvContext.setNextPartitionEnd(AnalyzerUtils.parseLiteralExprToDateString(upperExpr, 1)); } else { mvContext.setNextPartitionEnd(null); } } private void generateNextTaskRun() { TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); Map<String, String> properties = mvContext.getProperties(); long mvId = Long.parseLong(properties.get(MV_ID)); String taskName = TaskBuilder.getMvTaskName(mvId); Map<String, String> newProperties = Maps.newHashMap(); for (Map.Entry<String, String> proEntry : properties.entrySet()) { if (proEntry.getValue() != null) { newProperties.put(proEntry.getKey(), proEntry.getValue()); } } newProperties.put(TaskRun.PARTITION_START, mvContext.getNextPartitionStart()); newProperties.put(TaskRun.PARTITION_END, mvContext.getNextPartitionEnd()); ExecuteOption option = new ExecuteOption(mvContext.getPriority(), false, newProperties); taskManager.executeTask(taskName, option); LOG.info("Submit a generate taskRun for task:{}, partitionStart:{}, partitionEnd:{}", mvId, mvContext.getNextPartitionStart(), mvContext.getNextPartitionEnd()); } private void refreshExternalTable(TaskRunContext context) { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { BaseTableInfo baseTableInfo = tablePair.first; Table table = tablePair.second; if (!table.isNativeTable()) { context.getCtx().getGlobalStateMgr().getMetadataMgr().refreshTable(baseTableInfo.getCatalogName(), baseTableInfo.getDbName(), table, Lists.newArrayList(), true); } } } private void updateMeta(ExecPlan execPlan) { if (!database.writeLockAndCheckExist()) { throw new DmlException("update meta failed. database:" + database.getFullName() + " not exist"); } try { Table mv = database.getTable(materializedView.getId()); if (mv == null) { throw new DmlException( "update meta failed. materialized view:" + materializedView.getName() + " not exist"); } MaterializedView.AsyncRefreshContext refreshContext = materializedView.getRefreshScheme().getAsyncRefreshContext(); updateMetaForOlapTable(refreshContext, execPlan); updateMetaForExternalTable(refreshContext, execPlan); } finally { database.writeUnlock(); } } private void updateMetaForOlapTable(MaterializedView.AsyncRefreshContext refreshContext, ExecPlan execPlan) { Map<Long, Map<String, MaterializedView.BasePartitionInfo>> currentVersionMap = refreshContext.getBaseTableVisibleVersionMap(); Map<Long, Map<String, MaterializedView.BasePartitionInfo>> changedTablePartitionInfos = getSourceTablePartitionInfos(execPlan); for (Map.Entry<Long, Map<String, MaterializedView.BasePartitionInfo>> tableEntry : changedTablePartitionInfos.entrySet()) { Long tableId = tableEntry.getKey(); if (!currentVersionMap.containsKey(tableId)) { currentVersionMap.put(tableId, Maps.newHashMap()); } Map<String, MaterializedView.BasePartitionInfo> currentTablePartitionInfo = currentVersionMap.get(tableId); Map<String, MaterializedView.BasePartitionInfo> partitionInfoMap = tableEntry.getValue(); currentTablePartitionInfo.putAll(partitionInfoMap); Table snapshotTable = snapshotBaseTables.get(tableId).second; if (snapshotTable.isOlapOrCloudNativeTable()) { OlapTable snapshotOlapTable = (OlapTable) snapshotTable; currentTablePartitionInfo.keySet().removeIf(partitionName -> !snapshotOlapTable.getPartitionNames().contains(partitionName)); } } if (!changedTablePartitionInfos.isEmpty()) { ChangeMaterializedViewRefreshSchemeLog changeRefreshSchemeLog = new ChangeMaterializedViewRefreshSchemeLog(materializedView); GlobalStateMgr.getCurrentState().getEditLog().logMvChangeRefreshScheme(changeRefreshSchemeLog); } } private void updateMetaForExternalTable(MaterializedView.AsyncRefreshContext refreshContext, ExecPlan execPlan) { Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> currentVersionMap = refreshContext.getBaseTableInfoVisibleVersionMap(); Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> changedTablePartitionInfos = getSourceTableInfoPartitionInfos(execPlan); for (Map.Entry<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> tableEntry : changedTablePartitionInfos.entrySet()) { BaseTableInfo baseTableInfo = tableEntry.getKey(); if (!currentVersionMap.containsKey(baseTableInfo)) { currentVersionMap.put(baseTableInfo, Maps.newHashMap()); } Map<String, MaterializedView.BasePartitionInfo> currentTablePartitionInfo = currentVersionMap.get(baseTableInfo); Map<String, MaterializedView.BasePartitionInfo> partitionInfoMap = tableEntry.getValue(); currentTablePartitionInfo.putAll(partitionInfoMap); Set<String> partitionNames = Sets.newHashSet(PartitionUtil.getPartitionNames(baseTableInfo.getTable())); currentTablePartitionInfo.keySet().removeIf(partitionName -> !partitionNames.contains(partitionName)); } if (!changedTablePartitionInfos.isEmpty()) { ChangeMaterializedViewRefreshSchemeLog changeRefreshSchemeLog = new ChangeMaterializedViewRefreshSchemeLog(materializedView); GlobalStateMgr.getCurrentState().getEditLog().logMvChangeRefreshScheme(changeRefreshSchemeLog); } } private void prepare(TaskRunContext context) { Map<String, String> properties = context.getProperties(); long mvId = Long.parseLong(properties.get(MV_ID)); database = GlobalStateMgr.getCurrentState().getDb(context.ctx.getDatabase()); if (database == null) { LOG.warn("database {} do not exist when refreshing materialized view:{}", context.ctx.getDatabase(), mvId); throw new DmlException("database " + context.ctx.getDatabase() + " do not exist."); } Table table = database.getTable(mvId); if (table == null) { LOG.warn("materialized view:{} in database:{} do not exist when refreshing", mvId, context.ctx.getDatabase()); throw new DmlException("database " + context.ctx.getDatabase() + " do not exist."); } materializedView = (MaterializedView) table; if (!materializedView.isActive()) { String errorMsg = String.format("Materialized view: %s, id: %d is not active, " + "skip sync partition and data with base tables", materializedView.getName(), mvId); LOG.warn(errorMsg); throw new DmlException(errorMsg); } mvContext = new MvTaskRunContext(context); } private void syncPartitions() { snapshotBaseTables = collectBaseTables(materializedView); PartitionInfo partitionInfo = materializedView.getPartitionInfo(); if (partitionInfo instanceof ExpressionRangePartitionInfo) { syncPartitionsForExpr(); } } private Pair<Table, Column> getPartitionTableAndColumn( Map<Long, Pair<BaseTableInfo, Table>> tables) { SlotRef slotRef = MaterializedView.getPartitionSlotRef(materializedView); for (Pair<BaseTableInfo, Table> tableInfo : tables.values()) { BaseTableInfo baseTableInfo = tableInfo.first; Table table = tableInfo.second; if (slotRef.getTblNameWithoutAnalyzed().getTbl().equals(baseTableInfo.getTableName())) { return Pair.create(table, table.getColumn(slotRef.getColumnName())); } } return Pair.create(null, null); } private void syncPartitionsForExpr() { Expr partitionExpr = MaterializedView.getPartitionExpr(materializedView); Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Table partitionBaseTable = partitionTableAndColumn.first; Preconditions.checkNotNull(partitionBaseTable); Column partitionColumn = partitionTableAndColumn.second; Preconditions.checkNotNull(partitionColumn); PartitionDiff partitionDiff = new PartitionDiff(); Map<String, Range<PartitionKey>> basePartitionMap; Map<String, Range<PartitionKey>> mvPartitionMap = materializedView.getRangePartitionMap(); database.readLock(); try { basePartitionMap = PartitionUtil.getPartitionRange(partitionBaseTable, partitionColumn); if (partitionExpr instanceof SlotRef) { partitionDiff = SyncPartitionUtils.calcSyncSamePartition(basePartitionMap, mvPartitionMap); } else if (partitionExpr instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) partitionExpr; String granularity = ((StringLiteral) functionCallExpr.getChild(0)).getValue().toLowerCase(); partitionDiff = SyncPartitionUtils.calcSyncRollupPartition(basePartitionMap, mvPartitionMap, granularity, partitionColumn.getPrimitiveType()); } } catch (UserException e) { LOG.warn("Materialized view compute partition difference with base table failed.", e); return; } finally { database.readUnlock(); } Map<String, Range<PartitionKey>> deletes = partitionDiff.getDeletes(); for (Map.Entry<String, Range<PartitionKey>> deleteEntry : deletes.entrySet()) { String mvPartitionName = deleteEntry.getKey(); dropPartition(database, materializedView, mvPartitionName); } LOG.info("The process of synchronizing materialized view [{}] delete partitions range [{}]", materializedView.getName(), deletes); Map<String, String> partitionProperties = getPartitionProperties(materializedView); DistributionDesc distributionDesc = getDistributionDesc(materializedView); Map<String, Range<PartitionKey>> adds = partitionDiff.getAdds(); for (Map.Entry<String, Range<PartitionKey>> addEntry : adds.entrySet()) { String mvPartitionName = addEntry.getKey(); addPartition(database, materializedView, mvPartitionName, addEntry.getValue(), partitionProperties, distributionDesc); mvPartitionMap.put(mvPartitionName, addEntry.getValue()); } LOG.info("The process of synchronizing materialized view [{}] add partitions range [{}]", materializedView.getName(), adds); Map<String, Set<String>> baseToMvNameRef = SyncPartitionUtils .generatePartitionRefMap(basePartitionMap, mvPartitionMap); Map<String, Set<String>> mvToBaseNameRef = SyncPartitionUtils .generatePartitionRefMap(mvPartitionMap, basePartitionMap); mvContext.setBaseToMvNameRef(baseToMvNameRef); mvContext.setMvToBaseNameRef(mvToBaseNameRef); mvContext.setBasePartitionMap(basePartitionMap); } private boolean needToRefreshTable(Table table) { return CollectionUtils.isNotEmpty(materializedView.getUpdatedPartitionNamesOfTable(table)); } private boolean needToRefreshNonPartitionTable(Table partitionTable) { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table snapshotTable = tablePair.second; if (snapshotTable.getId() == partitionTable.getId()) { continue; } if (!supportRefreshByPartition(snapshotTable)) { continue; } if (needToRefreshTable(snapshotTable)) { return true; } } return false; } private boolean unPartitionedMVNeedToRefresh() { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table snapshotTable = tablePair.second; if (!supportRefreshByPartition(snapshotTable)) { return true; } if (needToRefreshTable(snapshotTable)) { return true; } } return false; } @VisibleForTesting public Set<String> getPartitionsToRefreshForMaterializedView(Map<String, String> properties) throws AnalysisException { String start = properties.get(TaskRun.PARTITION_START); String end = properties.get(TaskRun.PARTITION_END); boolean force = Boolean.parseBoolean(properties.get(TaskRun.FORCE)); PartitionInfo partitionInfo = materializedView.getPartitionInfo(); Set<String> needRefreshMvPartitionNames = getPartitionsToRefreshForMaterializedView(partitionInfo, start, end, force); if (this.getMVTaskRunExtraMessage() != null) { MVTaskRunExtraMessage extraMessage = this.getMVTaskRunExtraMessage(); extraMessage.setForceRefresh(force); extraMessage.setPartitionStart(start); extraMessage.setPartitionEnd(end); } return needRefreshMvPartitionNames; } private Set<String> getPartitionsToRefreshForMaterializedView(PartitionInfo partitionInfo, String start, String end, boolean force) throws AnalysisException { if (force && start == null && end == null) { return Sets.newHashSet(materializedView.getPartitionNames()); } Set<String> needRefreshMvPartitionNames = Sets.newHashSet(); if (partitionInfo instanceof SinglePartitionInfo) { if (force || unPartitionedMVNeedToRefresh()) { return Sets.newHashSet(materializedView.getPartitionNames()); } } else if (partitionInfo instanceof ExpressionRangePartitionInfo) { Expr partitionExpr = MaterializedView.getPartitionExpr(materializedView); Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Table partitionTable = partitionTableAndColumn.first; Set<String> mvRangePartitionNames = SyncPartitionUtils.getPartitionNamesByRangeWithPartitionLimit( materializedView, start, end, mvContext.type); if (needToRefreshNonPartitionTable(partitionTable)) { if (start == null && end == null) { return Sets.newHashSet(materializedView.getPartitionNames()); } else { return getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(partitionTable, mvRangePartitionNames, true); } } if (partitionExpr instanceof SlotRef) { return getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(partitionTable, mvRangePartitionNames, force); } else if (partitionExpr instanceof FunctionCallExpr) { needRefreshMvPartitionNames = getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(partitionTable, mvRangePartitionNames, force); Set<String> baseChangedPartitionNames = getBasePartitionNamesByMVPartitionNames(needRefreshMvPartitionNames); LOG.debug("Start calcPotentialRefreshPartition, needRefreshMvPartitionNames: {}," + " baseChangedPartitionNames: {}", needRefreshMvPartitionNames, baseChangedPartitionNames); SyncPartitionUtils.calcPotentialRefreshPartition(needRefreshMvPartitionNames, baseChangedPartitionNames, mvContext.baseToMvNameRef, mvContext.mvToBaseNameRef); LOG.debug("Finish calcPotentialRefreshPartition, needRefreshMvPartitionNames: {}," + " baseChangedPartitionNames: {}", needRefreshMvPartitionNames, baseChangedPartitionNames); } } else { throw new DmlException("unsupported partition info type:" + partitionInfo.getClass().getName()); } return needRefreshMvPartitionNames; } private Set<String> getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(Table partitionTable, Set<String> mvRangePartitionNames, boolean force) { if (force || !supportRefreshByPartition(partitionTable)) { return Sets.newHashSet(mvRangePartitionNames); } Set<String> updatePartitionNames = materializedView.getUpdatedPartitionNamesOfTable(partitionTable); if (updatePartitionNames == null) { return mvRangePartitionNames; } Set<String> result = getMVPartitionNamesByBasePartitionNames(updatePartitionNames); result.retainAll(mvRangePartitionNames); return result; } private Set<String> getMVPartitionNamesByBasePartitionNames(Set<String> basePartitionNames) { Set<String> result = Sets.newHashSet(); for (String basePartitionName : basePartitionNames) { result.addAll(mvContext.baseToMvNameRef.get(basePartitionName)); } return result; } private Set<String> getBasePartitionNamesByMVPartitionNames(Set<String> mvPartitionNames) { Set<String> result = Sets.newHashSet(); for (String mvPartitionName : mvPartitionNames) { result.addAll(mvContext.mvToBaseNameRef.get(mvPartitionName)); } return result; } @VisibleForTesting public Map<String, Set<String>> getSourceTablePartitions(Set<String> affectedMaterializedViewPartitions) { Table partitionTable = null; if (materializedView.getPartitionInfo() instanceof ExpressionRangePartitionInfo) { Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); partitionTable = partitionTableAndColumn.first; } Map<String, Set<String>> tableNamePartitionNames = Maps.newHashMap(); for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table table = tablePair.second; if (partitionTable != null && partitionTable == table) { Set<String> needRefreshTablePartitionNames = Sets.newHashSet(); Map<String, Set<String>> mvToBaseNameRef = mvContext.getMvToBaseNameRef(); for (String mvPartitionName : affectedMaterializedViewPartitions) { needRefreshTablePartitionNames.addAll(mvToBaseNameRef.get(mvPartitionName)); } tableNamePartitionNames.put(table.getName(), needRefreshTablePartitionNames); } else { if (table.isNativeTable()) { tableNamePartitionNames.put(table.getName(), ((OlapTable) table).getPartitionNames()); } } } return tableNamePartitionNames; } private ExecPlan generateRefreshPlan(ConnectContext ctx, InsertStmt insertStmt) throws AnalysisException { return StatementPlanner.plan(insertStmt, ctx); } @VisibleForTesting public InsertStmt generateInsertStmt(Set<String> materializedViewPartitions, Map<String, Set<String>> sourceTablePartitions) { ConnectContext ctx = mvContext.getCtx(); ctx.getAuditEventBuilder().reset(); ctx.getAuditEventBuilder() .setTimestamp(System.currentTimeMillis()) .setClientIp(mvContext.getRemoteIp()) .setUser(ctx.getQualifiedUser()) .setDb(ctx.getDatabase()); ctx.getPlannerProfile().reset(); ctx.setThreadLocalInfo(); ctx.getSessionVariable().setEnableMaterializedViewRewrite(false); String definition = mvContext.getDefinition(); InsertStmt insertStmt = (InsertStmt) SqlParser.parse(definition, ctx.getSessionVariable()).get(0); insertStmt.setTargetPartitionNames(new PartitionNames(false, new ArrayList<>(materializedViewPartitions))); insertStmt.setSystem(true); Analyzer.analyze(insertStmt, ctx); QueryStatement queryStatement = insertStmt.getQueryStatement(); Map<String, TableRelation> tableRelations = AnalyzerUtils.collectAllTableRelation(queryStatement); for (Map.Entry<String, TableRelation> nameTableRelationEntry : tableRelations.entrySet()) { Set<String> tablePartitionNames = sourceTablePartitions.get(nameTableRelationEntry.getKey()); TableRelation tableRelation = nameTableRelationEntry.getValue(); tableRelation.setPartitionNames( new PartitionNames(false, tablePartitionNames == null ? null : new ArrayList<>(tablePartitionNames))); Table table = tableRelation.getTable(); if (tablePartitionNames != null && !table.isNativeTable()) { generatePartitionPredicate(tablePartitionNames, queryStatement, tableRelation); } } return insertStmt; } private void generatePartitionPredicate(Set<String> tablePartitionNames, QueryStatement queryStatement, TableRelation tableRelation) { List<Range<PartitionKey>> sourceTablePartitionRange = Lists.newArrayList(); for (String partitionName : tablePartitionNames) { sourceTablePartitionRange.add(mvContext.getBasePartitionMap().get(partitionName)); } sourceTablePartitionRange = MvUtils.mergeRanges(sourceTablePartitionRange); SlotRef partitionSlot = MaterializedView.getPartitionSlotRef(materializedView); List<String> columnOutputNames = queryStatement.getQueryRelation().getColumnOutputNames(); List<Expr> outputExpressions = queryStatement.getQueryRelation().getOutputExpression(); Expr outputPartitionSlot = null; for (int i = 0; i < outputExpressions.size(); ++i) { if (columnOutputNames.get(i).equalsIgnoreCase(partitionSlot.getColumnName())) { outputPartitionSlot = outputExpressions.get(i); break; } } if (outputPartitionSlot != null) { List<Expr> partitionPredicates = MvUtils.convertRange(outputPartitionSlot, sourceTablePartitionRange); Optional<Range<PartitionKey>> nullRange = sourceTablePartitionRange.stream(). filter(range -> range.lowerEndpoint().isMinValue()).findAny(); if (nullRange.isPresent()) { Expr isNullPredicate = new IsNullPredicate(outputPartitionSlot, false); partitionPredicates.add(isNullPredicate); } tableRelation.setPartitionPredicate(Expr.compoundOr(partitionPredicates)); } } private boolean checkBaseTablePartitionChange() { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { BaseTableInfo baseTableInfo = tablePair.first; Table snapshotTable = tablePair.second; Database db = baseTableInfo.getDb(); if (db == null) { return true; } db.readLock(); try { Table table = baseTableInfo.getTable(); if (table == null) { return true; } if (snapshotTable.isOlapOrCloudNativeTable()) { OlapTable snapShotOlapTable = (OlapTable) snapshotTable; if (snapShotOlapTable.getPartitionInfo() instanceof SinglePartitionInfo) { Set<String> partitionNames = ((OlapTable) table).getPartitionNames(); if (!snapShotOlapTable.getPartitionNames().equals(partitionNames)) { return true; } } else { Map<String, Range<PartitionKey>> snapshotPartitionMap = snapShotOlapTable.getRangePartitionMap(); Map<String, Range<PartitionKey>> currentPartitionMap = ((OlapTable) table).getRangePartitionMap(); boolean changed = SyncPartitionUtils.hasPartitionChange(snapshotPartitionMap, currentPartitionMap); if (changed) { return true; } } } else if (snapshotTable.isHiveTable() || snapshotTable.isHudiTable()) { HiveMetaStoreTable snapShotHMSTable = (HiveMetaStoreTable) snapshotTable; if (snapShotHMSTable.isUnPartitioned()) { if (!((HiveMetaStoreTable) table).isUnPartitioned()) { return true; } } else { PartitionInfo mvPartitionInfo = materializedView.getPartitionInfo(); if (!(mvPartitionInfo instanceof ExpressionRangePartitionInfo)) { return false; } Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Column partitionColumn = partitionTableAndColumn.second; if (!snapshotTable.containColumn(partitionColumn.getName())) { continue; } Map<String, Range<PartitionKey>> snapshotPartitionMap = PartitionUtil. getPartitionRange(snapshotTable, partitionColumn); Map<String, Range<PartitionKey>> currentPartitionMap = PartitionUtil. getPartitionRange(table, partitionColumn); boolean changed = SyncPartitionUtils.hasPartitionChange(snapshotPartitionMap, currentPartitionMap); if (changed) { return true; } } } else if (snapshotTable.isIcebergTable()) { IcebergTable snapShotIcebergTable = (IcebergTable) snapshotTable; if (snapShotIcebergTable.isUnPartitioned()) { if (!table.isUnPartitioned()) { return true; } } else { PartitionInfo mvPartitionInfo = materializedView.getPartitionInfo(); if (!(mvPartitionInfo instanceof ExpressionRangePartitionInfo)) { return false; } Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Column partitionColumn = partitionTableAndColumn.second; if (!snapShotIcebergTable.containColumn(partitionColumn.getName())) { continue; } Map<String, Range<PartitionKey>> snapshotPartitionMap = PartitionUtil. getPartitionRange(snapshotTable, partitionColumn); Map<String, Range<PartitionKey>> currentPartitionMap = PartitionUtil. getPartitionRange(table, partitionColumn); boolean changed = SyncPartitionUtils.hasPartitionChange(snapshotPartitionMap, currentPartitionMap); if (changed) { return true; } } } } catch (UserException e) { LOG.warn("Materialized view compute partition change failed", e); return true; } finally { db.readUnlock(); } } return false; } private Map<Long, Map<String, MaterializedView.BasePartitionInfo>> getSourceTablePartitionInfos(ExecPlan execPlan) { Map<Long, Map<String, MaterializedView.BasePartitionInfo>> selectedBasePartitionInfos = Maps.newHashMap(); List<ScanNode> scanNodes = execPlan.getScanNodes(); for (ScanNode scanNode : scanNodes) { if (scanNode instanceof OlapScanNode) { OlapScanNode olapScanNode = (OlapScanNode) scanNode; Map<String, MaterializedView.BasePartitionInfo> selectedPartitionIdVersions = getSelectedPartitionInfos(olapScanNode); OlapTable olapTable = olapScanNode.getOlapTable(); selectedBasePartitionInfos.put(olapTable.getId(), selectedPartitionIdVersions); } } return selectedBasePartitionInfos; } private Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> getSourceTableInfoPartitionInfos( ExecPlan execPlan) { Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> selectedBasePartitionInfos = Maps.newHashMap(); List<ScanNode> scanNodes = execPlan.getScanNodes(); for (ScanNode scanNode : scanNodes) { if (scanNode instanceof HdfsScanNode) { HdfsScanNode hdfsScanNode = (HdfsScanNode) scanNode; Table hiveTable = hdfsScanNode.getHiveTable(); Optional<BaseTableInfo> baseTableInfoOptional = materializedView.getBaseTableInfos().stream().filter( baseTableInfo -> baseTableInfo.getTableIdentifier().equals(hiveTable.getTableIdentifier())). findAny(); if (!baseTableInfoOptional.isPresent()) { continue; } Map<String, MaterializedView.BasePartitionInfo> selectedPartitionIdVersions = getSelectedPartitionInfos(hdfsScanNode, baseTableInfoOptional.get()); selectedBasePartitionInfos.put(baseTableInfoOptional.get(), selectedPartitionIdVersions); } } return selectedBasePartitionInfos; } @VisibleForTesting public void refreshMaterializedView(MvTaskRunContext mvContext, ExecPlan execPlan, InsertStmt insertStmt) throws Exception { Preconditions.checkNotNull(execPlan); Preconditions.checkNotNull(insertStmt); ConnectContext ctx = mvContext.getCtx(); StmtExecutor executor = new StmtExecutor(ctx, insertStmt); ctx.setExecutor(executor); ctx.setStmtId(new AtomicInteger().incrementAndGet()); ctx.setExecutionId(UUIDUtil.toTUniqueId(ctx.getQueryId())); try { executor.handleDMLStmt(execPlan, insertStmt); } finally { QeProcessorImpl.INSTANCE.unregisterQuery(ctx.getExecutionId()); auditAfterExec(mvContext, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } } @VisibleForTesting public Map<Long, Pair<BaseTableInfo, Table>> collectBaseTables(MaterializedView materializedView) { Map<Long, Pair<BaseTableInfo, Table>> tables = Maps.newHashMap(); List<BaseTableInfo> baseTableInfos = materializedView.getBaseTableInfos(); for (BaseTableInfo baseTableInfo : baseTableInfos) { Database db = baseTableInfo.getDb(); if (db == null) { LOG.warn("database {} do not exist when refreshing materialized view:{}", baseTableInfo.getDbInfoStr(), materializedView.getName()); throw new DmlException("database " + baseTableInfo.getDbInfoStr() + " do not exist."); } Table table = baseTableInfo.getTable(); if (table == null) { LOG.warn("table {} do not exist when refreshing materialized view:{}", baseTableInfo.getTableInfoStr(), materializedView.getName()); throw new DmlException("Materialized view base table: %s not exist.", baseTableInfo.getTableInfoStr()); } db.readLock(); try { if (table.isOlapTable()) { Table copied = new OlapTable(); if (!DeepCopy.copy(table, copied, OlapTable.class)) { throw new DmlException("Failed to copy olap table: %s", table.getName()); } tables.put(table.getId(), Pair.create(baseTableInfo, copied)); } else if (table.isCloudNativeTable()) { LakeTable copied = DeepCopy.copyWithGson(table, LakeTable.class); if (copied == null) { throw new DmlException("Failed to copy lake table: %s", table.getName()); } tables.put(table.getId(), Pair.create(baseTableInfo, copied)); } else { tables.put(table.getId(), Pair.create(baseTableInfo, table)); } } finally { db.readUnlock(); } } return tables; } private Map<String, String> getPartitionProperties(MaterializedView materializedView) { Map<String, String> partitionProperties = new HashMap<>(4); partitionProperties.put("replication_num", String.valueOf(materializedView.getDefaultReplicationNum())); partitionProperties.put("storage_medium", materializedView.getStorageMedium()); String storageCooldownTime = materializedView.getTableProperty().getProperties().get("storage_cooldown_time"); if (storageCooldownTime != null && !storageCooldownTime.equals(String.valueOf(DataProperty.MAX_COOLDOWN_TIME_MS))) { String storageCooldownTimeStr = TimeUtils.longToTimeString(Long.parseLong(storageCooldownTime)); partitionProperties.put("storage_cooldown_time", storageCooldownTimeStr); } return partitionProperties; } private DistributionDesc getDistributionDesc(MaterializedView materializedView) { HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) materializedView.getDefaultDistributionInfo(); List<String> distColumnNames = new ArrayList<>(); for (Column distributionColumn : hashDistributionInfo.getDistributionColumns()) { distColumnNames.add(distributionColumn.getName()); } return new HashDistributionDesc(hashDistributionInfo.getBucketNum(), distColumnNames); } private void addPartition(Database database, MaterializedView materializedView, String partitionName, Range<PartitionKey> partitionKeyRange, Map<String, String> partitionProperties, DistributionDesc distributionDesc) { String lowerBound = partitionKeyRange.lowerEndpoint().getKeys().get(0).getStringValue(); String upperBound = partitionKeyRange.upperEndpoint().getKeys().get(0).getStringValue(); boolean isMaxValue = partitionKeyRange.upperEndpoint().isMaxValue(); PartitionValue upperPartitionValue; if (isMaxValue) { upperPartitionValue = PartitionValue.MAX_VALUE; } else { upperPartitionValue = new PartitionValue(upperBound); } PartitionKeyDesc partitionKeyDesc = new PartitionKeyDesc( Collections.singletonList(new PartitionValue(lowerBound)), Collections.singletonList(upperPartitionValue)); SingleRangePartitionDesc singleRangePartitionDesc = new SingleRangePartitionDesc(false, partitionName, partitionKeyDesc, partitionProperties); try { GlobalStateMgr.getCurrentState().addPartitions( database, materializedView.getName(), new AddPartitionClause(singleRangePartitionDesc, distributionDesc, partitionProperties, false)); } catch (Exception e) { throw new DmlException("Expression add partition failed: %s, db: %s, table: %s", e, e.getMessage(), database.getFullName(), materializedView.getName()); } } private void dropPartition(Database database, MaterializedView materializedView, String mvPartitionName) { String dropPartitionName = materializedView.getPartition(mvPartitionName).getName(); if (!database.writeLockAndCheckExist()) { throw new DmlException("drop partition failed. database:" + database.getFullName() + " not exist"); } try { Table mv = database.getTable(materializedView.getId()); if (mv == null) { throw new DmlException("drop partition failed. mv:" + materializedView.getName() + " not exist"); } Partition mvPartition = mv.getPartition(dropPartitionName); if (mvPartition == null) { throw new DmlException("drop partition failed. partition:" + dropPartitionName + " not exist"); } GlobalStateMgr.getCurrentState().dropPartition( database, materializedView, new DropPartitionClause(false, dropPartitionName, false, true)); } catch (Exception e) { throw new DmlException("Expression add partition failed: %s, db: %s, table: %s", e, e.getMessage(), database.getFullName(), materializedView.getName()); } finally { database.writeUnlock(); } } private Map<String, MaterializedView.BasePartitionInfo> getSelectedPartitionInfos(OlapScanNode olapScanNode) { Map<String, MaterializedView.BasePartitionInfo> partitionInfos = Maps.newHashMap(); Collection<Long> selectedPartitionIds = olapScanNode.getSelectedPartitionIds(); Collection<String> selectedPartitionNames = olapScanNode.getSelectedPartitionNames(); Collection<Long> selectedPartitionVersions = olapScanNode.getSelectedPartitionVersions(); Iterator<Long> selectPartitionIdIterator = selectedPartitionIds.iterator(); Iterator<String> selectPartitionNameIterator = selectedPartitionNames.iterator(); Iterator<Long> selectPartitionVersionIterator = selectedPartitionVersions.iterator(); while (selectPartitionIdIterator.hasNext()) { long partitionId = selectPartitionIdIterator.next(); String partitionName = selectPartitionNameIterator.next(); long partitionVersion = selectPartitionVersionIterator.next(); partitionInfos.put(partitionName, new MaterializedView.BasePartitionInfo(partitionId, partitionVersion)); } return partitionInfos; } private Map<String, MaterializedView.BasePartitionInfo> getSelectedPartitionInfos(HdfsScanNode hdfsScanNode, BaseTableInfo baseTableInfo) { Map<String, MaterializedView.BasePartitionInfo> partitionInfos = Maps.newHashMap(); HiveTable hiveTable = hdfsScanNode.getHiveTable(); List<String> partitionColumnNames = hiveTable.getPartitionColumnNames(); List<String> selectedPartitionNames; if (hiveTable.isUnPartitioned()) { selectedPartitionNames = Lists.newArrayList(hiveTable.getTableName()); } else { Collection<Long> selectedPartitionIds = hdfsScanNode.getScanNodePredicates().getSelectedPartitionIds(); List<PartitionKey> selectedPartitionKey = Lists.newArrayList(); for (Long selectedPartitionId : selectedPartitionIds) { selectedPartitionKey .add(hdfsScanNode.getScanNodePredicates().getIdToPartitionKey().get(selectedPartitionId)); } selectedPartitionNames = selectedPartitionKey.stream().map(partitionKey -> PartitionUtil.toHivePartitionName(partitionColumnNames, partitionKey)).collect(Collectors.toList()); } List<com.starrocks.connector.PartitionInfo> hivePartitions = GlobalStateMgr. getCurrentState().getMetadataMgr().getPartitions(baseTableInfo.getCatalogName(), hiveTable, selectedPartitionNames); for (int index = 0; index < selectedPartitionNames.size(); ++index) { partitionInfos.put(selectedPartitionNames.get(index), new MaterializedView.BasePartitionInfo(-1, hivePartitions.get(index).getModifiedTime())); } return partitionInfos; } }
class PartitionBasedMaterializedViewRefreshProcessor extends BaseTaskRunProcessor { private static final Logger LOG = LogManager.getLogger(PartitionBasedMaterializedViewRefreshProcessor.class); public static final String MV_ID = "mvId"; private static final int MAX_RETRY_NUM = 10; private Database database; private MaterializedView materializedView; private MvTaskRunContext mvContext; private Map<Long, Pair<BaseTableInfo, Table>> snapshotBaseTables; @VisibleForTesting public MvTaskRunContext getMvContext() { return mvContext; } @VisibleForTesting public void setMvContext(MvTaskRunContext mvContext) { this.mvContext = mvContext; } @Override public void processTaskRun(TaskRunContext context) throws Exception { prepare(context); InsertStmt insertStmt = null; ExecPlan execPlan = null; int retryNum = 0; boolean checked = false; while (!checked) { syncPartitions(); database.readLock(); try { refreshExternalTable(context); if (checkBaseTablePartitionChange()) { retryNum++; if (retryNum > MAX_RETRY_NUM) { throw new DmlException("materialized view:%s refresh task failed", materializedView.getName()); } LOG.info("materialized view:{} base partition has changed. retry to sync partitions, retryNum:{}", materializedView.getName(), retryNum); continue; } checked = true; Set<String> partitionsToRefresh = getPartitionsToRefreshForMaterializedView(context.getProperties()); if (partitionsToRefresh.isEmpty()) { LOG.info("no partitions to refresh for materialized view {}", materializedView.getName()); return; } filterPartitionByRefreshNumber(partitionsToRefresh, materializedView); LOG.debug("materialized view partitions to refresh:{}", partitionsToRefresh); Map<String, Set<String>> sourceTablePartitions = getSourceTablePartitions(partitionsToRefresh); LOG.debug("materialized view:{} source partitions :{}", materializedView.getName(), sourceTablePartitions); if (this.getMVTaskRunExtraMessage() != null) { MVTaskRunExtraMessage extraMessage = getMVTaskRunExtraMessage(); extraMessage.setMvPartitionsToRefresh(partitionsToRefresh); extraMessage.setBasePartitionsToRefreshMap(sourceTablePartitions); } insertStmt = generateInsertStmt(partitionsToRefresh, sourceTablePartitions); execPlan = generateRefreshPlan(mvContext.getCtx(), insertStmt); if (mvContext.getCtx().getSessionVariable().isEnableOptimizerTraceLog()) { StringBuffer sb = new StringBuffer(); sb.append(String.format("[TRACE QUERY] MV: %s\n", materializedView.getName())); sb.append(String.format("MV PartitionsToRefresh: %s \n", Joiner.on(",").join(partitionsToRefresh))); if (sourceTablePartitions != null) { sb.append(String.format("Base PartitionsToScan:%s\n", sourceTablePartitions)); } sb.append("Insert Plan:\n"); sb.append(execPlan.getExplainString(StatementBase.ExplainLevel.VERBOSE)); LOG.info(sb.toString()); } mvContext.setExecPlan(execPlan); } finally { database.readUnlock(); } } refreshMaterializedView(mvContext, execPlan, insertStmt); updateMeta(execPlan); if (mvContext.hasNextBatchPartition()) { generateNextTaskRun(); } } public MVTaskRunExtraMessage getMVTaskRunExtraMessage() { if (this.mvContext.status == null) { return null; } return this.mvContext.status.getMvTaskRunExtraMessage(); } @VisibleForTesting public void filterPartitionByRefreshNumber(Set<String> partitionsToRefresh, MaterializedView materializedView) { int partitionRefreshNumber = materializedView.getTableProperty().getPartitionRefreshNumber(); if (partitionRefreshNumber <= 0) { return; } Map<String, Range<PartitionKey>> rangePartitionMap = materializedView.getRangePartitionMap(); if (partitionRefreshNumber >= rangePartitionMap.size()) { return; } Map<String, Range<PartitionKey>> mappedPartitionsToRefresh = Maps.newHashMap(); for (String partitionName : partitionsToRefresh) { mappedPartitionsToRefresh.put(partitionName, rangePartitionMap.get(partitionName)); } LinkedHashMap<String, Range<PartitionKey>> sortedPartition = mappedPartitionsToRefresh.entrySet().stream() .sorted(Map.Entry.comparingByValue(RangeUtils.RANGE_COMPARATOR)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -> e1, LinkedHashMap::new)); Iterator<String> partitionNameIter = sortedPartition.keySet().iterator(); for (int i = 0; i < partitionRefreshNumber; i++) { if (partitionNameIter.hasNext()) { partitionNameIter.next(); } } String nextPartitionStart = null; String endPartitionName = null; if (partitionNameIter.hasNext()) { String startPartitionName = partitionNameIter.next(); Range<PartitionKey> partitionKeyRange = mappedPartitionsToRefresh.get(startPartitionName); LiteralExpr lowerExpr = partitionKeyRange.lowerEndpoint().getKeys().get(0); nextPartitionStart = AnalyzerUtils.parseLiteralExprToDateString(lowerExpr, 0); endPartitionName = startPartitionName; partitionsToRefresh.remove(endPartitionName); } while (partitionNameIter.hasNext()) { endPartitionName = partitionNameIter.next(); partitionsToRefresh.remove(endPartitionName); } mvContext.setNextPartitionStart(nextPartitionStart); if (endPartitionName != null) { LiteralExpr upperExpr = mappedPartitionsToRefresh.get(endPartitionName).upperEndpoint().getKeys().get(0); mvContext.setNextPartitionEnd(AnalyzerUtils.parseLiteralExprToDateString(upperExpr, 1)); } else { mvContext.setNextPartitionEnd(null); } } private void generateNextTaskRun() { TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); Map<String, String> properties = mvContext.getProperties(); long mvId = Long.parseLong(properties.get(MV_ID)); String taskName = TaskBuilder.getMvTaskName(mvId); Map<String, String> newProperties = Maps.newHashMap(); for (Map.Entry<String, String> proEntry : properties.entrySet()) { if (proEntry.getValue() != null) { newProperties.put(proEntry.getKey(), proEntry.getValue()); } } newProperties.put(TaskRun.PARTITION_START, mvContext.getNextPartitionStart()); newProperties.put(TaskRun.PARTITION_END, mvContext.getNextPartitionEnd()); ExecuteOption option = new ExecuteOption(mvContext.getPriority(), false, newProperties); taskManager.executeTask(taskName, option); LOG.info("Submit a generate taskRun for task:{}, partitionStart:{}, partitionEnd:{}", mvId, mvContext.getNextPartitionStart(), mvContext.getNextPartitionEnd()); } private void refreshExternalTable(TaskRunContext context) { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { BaseTableInfo baseTableInfo = tablePair.first; Table table = tablePair.second; if (!table.isNativeTableOrMaterializedView()) { context.getCtx().getGlobalStateMgr().getMetadataMgr().refreshTable(baseTableInfo.getCatalogName(), baseTableInfo.getDbName(), table, Lists.newArrayList(), true); } } } private void updateMeta(ExecPlan execPlan) { if (!database.writeLockAndCheckExist()) { throw new DmlException("update meta failed. database:" + database.getFullName() + " not exist"); } try { Table mv = database.getTable(materializedView.getId()); if (mv == null) { throw new DmlException( "update meta failed. materialized view:" + materializedView.getName() + " not exist"); } MaterializedView.AsyncRefreshContext refreshContext = materializedView.getRefreshScheme().getAsyncRefreshContext(); updateMetaForOlapTable(refreshContext, execPlan); updateMetaForExternalTable(refreshContext, execPlan); } finally { database.writeUnlock(); } } private void updateMetaForOlapTable(MaterializedView.AsyncRefreshContext refreshContext, ExecPlan execPlan) { Map<Long, Map<String, MaterializedView.BasePartitionInfo>> currentVersionMap = refreshContext.getBaseTableVisibleVersionMap(); Map<Long, Map<String, MaterializedView.BasePartitionInfo>> changedTablePartitionInfos = getSourceTablePartitionInfos(execPlan); for (Map.Entry<Long, Map<String, MaterializedView.BasePartitionInfo>> tableEntry : changedTablePartitionInfos.entrySet()) { Long tableId = tableEntry.getKey(); if (!currentVersionMap.containsKey(tableId)) { currentVersionMap.put(tableId, Maps.newHashMap()); } Map<String, MaterializedView.BasePartitionInfo> currentTablePartitionInfo = currentVersionMap.get(tableId); Map<String, MaterializedView.BasePartitionInfo> partitionInfoMap = tableEntry.getValue(); currentTablePartitionInfo.putAll(partitionInfoMap); Table snapshotTable = snapshotBaseTables.get(tableId).second; if (snapshotTable.isOlapOrCloudNativeTable()) { OlapTable snapshotOlapTable = (OlapTable) snapshotTable; currentTablePartitionInfo.keySet().removeIf(partitionName -> !snapshotOlapTable.getPartitionNames().contains(partitionName)); } } if (!changedTablePartitionInfos.isEmpty()) { ChangeMaterializedViewRefreshSchemeLog changeRefreshSchemeLog = new ChangeMaterializedViewRefreshSchemeLog(materializedView); GlobalStateMgr.getCurrentState().getEditLog().logMvChangeRefreshScheme(changeRefreshSchemeLog); } } private void updateMetaForExternalTable(MaterializedView.AsyncRefreshContext refreshContext, ExecPlan execPlan) { Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> currentVersionMap = refreshContext.getBaseTableInfoVisibleVersionMap(); Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> changedTablePartitionInfos = getSourceTableInfoPartitionInfos(execPlan); for (Map.Entry<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> tableEntry : changedTablePartitionInfos.entrySet()) { BaseTableInfo baseTableInfo = tableEntry.getKey(); if (!currentVersionMap.containsKey(baseTableInfo)) { currentVersionMap.put(baseTableInfo, Maps.newHashMap()); } Map<String, MaterializedView.BasePartitionInfo> currentTablePartitionInfo = currentVersionMap.get(baseTableInfo); Map<String, MaterializedView.BasePartitionInfo> partitionInfoMap = tableEntry.getValue(); currentTablePartitionInfo.putAll(partitionInfoMap); Set<String> partitionNames = Sets.newHashSet(PartitionUtil.getPartitionNames(baseTableInfo.getTable())); currentTablePartitionInfo.keySet().removeIf(partitionName -> !partitionNames.contains(partitionName)); } if (!changedTablePartitionInfos.isEmpty()) { ChangeMaterializedViewRefreshSchemeLog changeRefreshSchemeLog = new ChangeMaterializedViewRefreshSchemeLog(materializedView); GlobalStateMgr.getCurrentState().getEditLog().logMvChangeRefreshScheme(changeRefreshSchemeLog); } } private void prepare(TaskRunContext context) { Map<String, String> properties = context.getProperties(); long mvId = Long.parseLong(properties.get(MV_ID)); database = GlobalStateMgr.getCurrentState().getDb(context.ctx.getDatabase()); if (database == null) { LOG.warn("database {} do not exist when refreshing materialized view:{}", context.ctx.getDatabase(), mvId); throw new DmlException("database " + context.ctx.getDatabase() + " do not exist."); } Table table = database.getTable(mvId); if (table == null) { LOG.warn("materialized view:{} in database:{} do not exist when refreshing", mvId, context.ctx.getDatabase()); throw new DmlException("database " + context.ctx.getDatabase() + " do not exist."); } materializedView = (MaterializedView) table; if (!materializedView.isActive()) { String errorMsg = String.format("Materialized view: %s, id: %d is not active, " + "skip sync partition and data with base tables", materializedView.getName(), mvId); LOG.warn(errorMsg); throw new DmlException(errorMsg); } mvContext = new MvTaskRunContext(context); } private void syncPartitions() { snapshotBaseTables = collectBaseTables(materializedView); PartitionInfo partitionInfo = materializedView.getPartitionInfo(); if (partitionInfo instanceof ExpressionRangePartitionInfo) { syncPartitionsForExpr(); } } private Pair<Table, Column> getPartitionTableAndColumn( Map<Long, Pair<BaseTableInfo, Table>> tables) { SlotRef slotRef = MaterializedView.getPartitionSlotRef(materializedView); for (Pair<BaseTableInfo, Table> tableInfo : tables.values()) { BaseTableInfo baseTableInfo = tableInfo.first; Table table = tableInfo.second; if (slotRef.getTblNameWithoutAnalyzed().getTbl().equals(baseTableInfo.getTableName())) { return Pair.create(table, table.getColumn(slotRef.getColumnName())); } } return Pair.create(null, null); } private void syncPartitionsForExpr() { Expr partitionExpr = MaterializedView.getPartitionExpr(materializedView); Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Table partitionBaseTable = partitionTableAndColumn.first; Preconditions.checkNotNull(partitionBaseTable); Column partitionColumn = partitionTableAndColumn.second; Preconditions.checkNotNull(partitionColumn); PartitionDiff partitionDiff = new PartitionDiff(); Map<String, Range<PartitionKey>> basePartitionMap; Map<String, Range<PartitionKey>> mvPartitionMap = materializedView.getRangePartitionMap(); database.readLock(); try { basePartitionMap = PartitionUtil.getPartitionRange(partitionBaseTable, partitionColumn); if (partitionExpr instanceof SlotRef) { partitionDiff = SyncPartitionUtils.calcSyncSamePartition(basePartitionMap, mvPartitionMap); } else if (partitionExpr instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) partitionExpr; String granularity = ((StringLiteral) functionCallExpr.getChild(0)).getValue().toLowerCase(); partitionDiff = SyncPartitionUtils.calcSyncRollupPartition(basePartitionMap, mvPartitionMap, granularity, partitionColumn.getPrimitiveType()); } } catch (UserException e) { LOG.warn("Materialized view compute partition difference with base table failed.", e); return; } finally { database.readUnlock(); } Map<String, Range<PartitionKey>> deletes = partitionDiff.getDeletes(); for (Map.Entry<String, Range<PartitionKey>> deleteEntry : deletes.entrySet()) { String mvPartitionName = deleteEntry.getKey(); dropPartition(database, materializedView, mvPartitionName); } LOG.info("The process of synchronizing materialized view [{}] delete partitions range [{}]", materializedView.getName(), deletes); Map<String, String> partitionProperties = getPartitionProperties(materializedView); DistributionDesc distributionDesc = getDistributionDesc(materializedView); Map<String, Range<PartitionKey>> adds = partitionDiff.getAdds(); for (Map.Entry<String, Range<PartitionKey>> addEntry : adds.entrySet()) { String mvPartitionName = addEntry.getKey(); addPartition(database, materializedView, mvPartitionName, addEntry.getValue(), partitionProperties, distributionDesc); mvPartitionMap.put(mvPartitionName, addEntry.getValue()); } LOG.info("The process of synchronizing materialized view [{}] add partitions range [{}]", materializedView.getName(), adds); Map<String, Set<String>> baseToMvNameRef = SyncPartitionUtils .generatePartitionRefMap(basePartitionMap, mvPartitionMap); Map<String, Set<String>> mvToBaseNameRef = SyncPartitionUtils .generatePartitionRefMap(mvPartitionMap, basePartitionMap); mvContext.setBaseToMvNameRef(baseToMvNameRef); mvContext.setMvToBaseNameRef(mvToBaseNameRef); mvContext.setBasePartitionMap(basePartitionMap); } private boolean needToRefreshTable(Table table) { return CollectionUtils.isNotEmpty(materializedView.getUpdatedPartitionNamesOfTable(table)); } private boolean needToRefreshNonPartitionTable(Table partitionTable) { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table snapshotTable = tablePair.second; if (snapshotTable.getId() == partitionTable.getId()) { continue; } if (!supportRefreshByPartition(snapshotTable)) { continue; } if (needToRefreshTable(snapshotTable)) { return true; } } return false; } private boolean unPartitionedMVNeedToRefresh() { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table snapshotTable = tablePair.second; if (!supportRefreshByPartition(snapshotTable)) { return true; } if (needToRefreshTable(snapshotTable)) { return true; } } return false; } @VisibleForTesting public Set<String> getPartitionsToRefreshForMaterializedView(Map<String, String> properties) throws AnalysisException { String start = properties.get(TaskRun.PARTITION_START); String end = properties.get(TaskRun.PARTITION_END); boolean force = Boolean.parseBoolean(properties.get(TaskRun.FORCE)); PartitionInfo partitionInfo = materializedView.getPartitionInfo(); Set<String> needRefreshMvPartitionNames = getPartitionsToRefreshForMaterializedView(partitionInfo, start, end, force); if (this.getMVTaskRunExtraMessage() != null) { MVTaskRunExtraMessage extraMessage = this.getMVTaskRunExtraMessage(); extraMessage.setForceRefresh(force); extraMessage.setPartitionStart(start); extraMessage.setPartitionEnd(end); } return needRefreshMvPartitionNames; } private Set<String> getPartitionsToRefreshForMaterializedView(PartitionInfo partitionInfo, String start, String end, boolean force) throws AnalysisException { if (force && start == null && end == null) { return Sets.newHashSet(materializedView.getPartitionNames()); } Set<String> needRefreshMvPartitionNames = Sets.newHashSet(); if (partitionInfo instanceof SinglePartitionInfo) { if (force || unPartitionedMVNeedToRefresh()) { return Sets.newHashSet(materializedView.getPartitionNames()); } } else if (partitionInfo instanceof ExpressionRangePartitionInfo) { Expr partitionExpr = MaterializedView.getPartitionExpr(materializedView); Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Table partitionTable = partitionTableAndColumn.first; Set<String> mvRangePartitionNames = SyncPartitionUtils.getPartitionNamesByRangeWithPartitionLimit( materializedView, start, end, mvContext.type); if (needToRefreshNonPartitionTable(partitionTable)) { if (start == null && end == null) { return Sets.newHashSet(materializedView.getPartitionNames()); } else { return getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(partitionTable, mvRangePartitionNames, true); } } if (partitionExpr instanceof SlotRef) { return getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(partitionTable, mvRangePartitionNames, force); } else if (partitionExpr instanceof FunctionCallExpr) { needRefreshMvPartitionNames = getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(partitionTable, mvRangePartitionNames, force); Set<String> baseChangedPartitionNames = getBasePartitionNamesByMVPartitionNames(needRefreshMvPartitionNames); LOG.debug("Start calcPotentialRefreshPartition, needRefreshMvPartitionNames: {}," + " baseChangedPartitionNames: {}", needRefreshMvPartitionNames, baseChangedPartitionNames); SyncPartitionUtils.calcPotentialRefreshPartition(needRefreshMvPartitionNames, baseChangedPartitionNames, mvContext.baseToMvNameRef, mvContext.mvToBaseNameRef); LOG.debug("Finish calcPotentialRefreshPartition, needRefreshMvPartitionNames: {}," + " baseChangedPartitionNames: {}", needRefreshMvPartitionNames, baseChangedPartitionNames); } } else { throw new DmlException("unsupported partition info type:" + partitionInfo.getClass().getName()); } return needRefreshMvPartitionNames; } private Set<String> getMVPartitionNamesToRefreshByRangePartitionNamesAndForce(Table partitionTable, Set<String> mvRangePartitionNames, boolean force) { if (force || !supportRefreshByPartition(partitionTable)) { return Sets.newHashSet(mvRangePartitionNames); } Set<String> updatePartitionNames = materializedView.getUpdatedPartitionNamesOfTable(partitionTable); if (updatePartitionNames == null) { return mvRangePartitionNames; } Set<String> result = getMVPartitionNamesByBasePartitionNames(updatePartitionNames); result.retainAll(mvRangePartitionNames); return result; } private Set<String> getMVPartitionNamesByBasePartitionNames(Set<String> basePartitionNames) { Set<String> result = Sets.newHashSet(); for (String basePartitionName : basePartitionNames) { result.addAll(mvContext.baseToMvNameRef.get(basePartitionName)); } return result; } private Set<String> getBasePartitionNamesByMVPartitionNames(Set<String> mvPartitionNames) { Set<String> result = Sets.newHashSet(); for (String mvPartitionName : mvPartitionNames) { result.addAll(mvContext.mvToBaseNameRef.get(mvPartitionName)); } return result; } @VisibleForTesting public Map<String, Set<String>> getSourceTablePartitions(Set<String> affectedMaterializedViewPartitions) { Table partitionTable = null; if (materializedView.getPartitionInfo() instanceof ExpressionRangePartitionInfo) { Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); partitionTable = partitionTableAndColumn.first; } Map<String, Set<String>> tableNamePartitionNames = Maps.newHashMap(); for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table table = tablePair.second; if (partitionTable != null && partitionTable == table) { Set<String> needRefreshTablePartitionNames = Sets.newHashSet(); Map<String, Set<String>> mvToBaseNameRef = mvContext.getMvToBaseNameRef(); for (String mvPartitionName : affectedMaterializedViewPartitions) { needRefreshTablePartitionNames.addAll(mvToBaseNameRef.get(mvPartitionName)); } tableNamePartitionNames.put(table.getName(), needRefreshTablePartitionNames); } else { if (table.isNativeTableOrMaterializedView()) { tableNamePartitionNames.put(table.getName(), ((OlapTable) table).getPartitionNames()); } } } return tableNamePartitionNames; } private ExecPlan generateRefreshPlan(ConnectContext ctx, InsertStmt insertStmt) throws AnalysisException { return StatementPlanner.plan(insertStmt, ctx); } @VisibleForTesting public InsertStmt generateInsertStmt(Set<String> materializedViewPartitions, Map<String, Set<String>> sourceTablePartitions) { ConnectContext ctx = mvContext.getCtx(); ctx.getAuditEventBuilder().reset(); ctx.getAuditEventBuilder() .setTimestamp(System.currentTimeMillis()) .setClientIp(mvContext.getRemoteIp()) .setUser(ctx.getQualifiedUser()) .setDb(ctx.getDatabase()); ctx.getPlannerProfile().reset(); ctx.setThreadLocalInfo(); ctx.getSessionVariable().setEnableMaterializedViewRewrite(false); String definition = mvContext.getDefinition(); InsertStmt insertStmt = (InsertStmt) SqlParser.parse(definition, ctx.getSessionVariable()).get(0); insertStmt.setTargetPartitionNames(new PartitionNames(false, new ArrayList<>(materializedViewPartitions))); insertStmt.setSystem(true); Analyzer.analyze(insertStmt, ctx); QueryStatement queryStatement = insertStmt.getQueryStatement(); Map<String, TableRelation> tableRelations = AnalyzerUtils.collectAllTableRelation(queryStatement); for (Map.Entry<String, TableRelation> nameTableRelationEntry : tableRelations.entrySet()) { Set<String> tablePartitionNames = sourceTablePartitions.get(nameTableRelationEntry.getKey()); TableRelation tableRelation = nameTableRelationEntry.getValue(); tableRelation.setPartitionNames( new PartitionNames(false, tablePartitionNames == null ? null : new ArrayList<>(tablePartitionNames))); Table table = tableRelation.getTable(); if (tablePartitionNames != null && !table.isNativeTableOrMaterializedView()) { generatePartitionPredicate(tablePartitionNames, queryStatement, tableRelation); } } return insertStmt; } private void generatePartitionPredicate(Set<String> tablePartitionNames, QueryStatement queryStatement, TableRelation tableRelation) { List<Range<PartitionKey>> sourceTablePartitionRange = Lists.newArrayList(); for (String partitionName : tablePartitionNames) { sourceTablePartitionRange.add(mvContext.getBasePartitionMap().get(partitionName)); } sourceTablePartitionRange = MvUtils.mergeRanges(sourceTablePartitionRange); SlotRef partitionSlot = MaterializedView.getPartitionSlotRef(materializedView); List<String> columnOutputNames = queryStatement.getQueryRelation().getColumnOutputNames(); List<Expr> outputExpressions = queryStatement.getQueryRelation().getOutputExpression(); Expr outputPartitionSlot = null; for (int i = 0; i < outputExpressions.size(); ++i) { if (columnOutputNames.get(i).equalsIgnoreCase(partitionSlot.getColumnName())) { outputPartitionSlot = outputExpressions.get(i); break; } } if (outputPartitionSlot != null) { List<Expr> partitionPredicates = MvUtils.convertRange(outputPartitionSlot, sourceTablePartitionRange); Optional<Range<PartitionKey>> nullRange = sourceTablePartitionRange.stream(). filter(range -> range.lowerEndpoint().isMinValue()).findAny(); if (nullRange.isPresent()) { Expr isNullPredicate = new IsNullPredicate(outputPartitionSlot, false); partitionPredicates.add(isNullPredicate); } tableRelation.setPartitionPredicate(Expr.compoundOr(partitionPredicates)); } } private boolean checkBaseTablePartitionChange() { for (Pair<BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { BaseTableInfo baseTableInfo = tablePair.first; Table snapshotTable = tablePair.second; Database db = baseTableInfo.getDb(); if (db == null) { return true; } db.readLock(); try { Table table = baseTableInfo.getTable(); if (table == null) { return true; } if (snapshotTable.isOlapOrCloudNativeTable()) { OlapTable snapShotOlapTable = (OlapTable) snapshotTable; if (snapShotOlapTable.getPartitionInfo() instanceof SinglePartitionInfo) { Set<String> partitionNames = ((OlapTable) table).getPartitionNames(); if (!snapShotOlapTable.getPartitionNames().equals(partitionNames)) { return true; } } else { Map<String, Range<PartitionKey>> snapshotPartitionMap = snapShotOlapTable.getRangePartitionMap(); Map<String, Range<PartitionKey>> currentPartitionMap = ((OlapTable) table).getRangePartitionMap(); boolean changed = SyncPartitionUtils.hasPartitionChange(snapshotPartitionMap, currentPartitionMap); if (changed) { return true; } } } else if (snapshotTable.isHiveTable() || snapshotTable.isHudiTable()) { HiveMetaStoreTable snapShotHMSTable = (HiveMetaStoreTable) snapshotTable; if (snapShotHMSTable.isUnPartitioned()) { if (!((HiveMetaStoreTable) table).isUnPartitioned()) { return true; } } else { PartitionInfo mvPartitionInfo = materializedView.getPartitionInfo(); if (!(mvPartitionInfo instanceof ExpressionRangePartitionInfo)) { return false; } Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Column partitionColumn = partitionTableAndColumn.second; if (!snapshotTable.containColumn(partitionColumn.getName())) { continue; } Map<String, Range<PartitionKey>> snapshotPartitionMap = PartitionUtil. getPartitionRange(snapshotTable, partitionColumn); Map<String, Range<PartitionKey>> currentPartitionMap = PartitionUtil. getPartitionRange(table, partitionColumn); boolean changed = SyncPartitionUtils.hasPartitionChange(snapshotPartitionMap, currentPartitionMap); if (changed) { return true; } } } else if (snapshotTable.isIcebergTable()) { IcebergTable snapShotIcebergTable = (IcebergTable) snapshotTable; if (snapShotIcebergTable.isUnPartitioned()) { if (!table.isUnPartitioned()) { return true; } } else { PartitionInfo mvPartitionInfo = materializedView.getPartitionInfo(); if (!(mvPartitionInfo instanceof ExpressionRangePartitionInfo)) { return false; } Pair<Table, Column> partitionTableAndColumn = getPartitionTableAndColumn(snapshotBaseTables); Column partitionColumn = partitionTableAndColumn.second; if (!snapShotIcebergTable.containColumn(partitionColumn.getName())) { continue; } Map<String, Range<PartitionKey>> snapshotPartitionMap = PartitionUtil. getPartitionRange(snapshotTable, partitionColumn); Map<String, Range<PartitionKey>> currentPartitionMap = PartitionUtil. getPartitionRange(table, partitionColumn); boolean changed = SyncPartitionUtils.hasPartitionChange(snapshotPartitionMap, currentPartitionMap); if (changed) { return true; } } } } catch (UserException e) { LOG.warn("Materialized view compute partition change failed", e); return true; } finally { db.readUnlock(); } } return false; } private Map<Long, Map<String, MaterializedView.BasePartitionInfo>> getSourceTablePartitionInfos(ExecPlan execPlan) { Map<Long, Map<String, MaterializedView.BasePartitionInfo>> selectedBasePartitionInfos = Maps.newHashMap(); List<ScanNode> scanNodes = execPlan.getScanNodes(); for (ScanNode scanNode : scanNodes) { if (scanNode instanceof OlapScanNode) { OlapScanNode olapScanNode = (OlapScanNode) scanNode; Map<String, MaterializedView.BasePartitionInfo> selectedPartitionIdVersions = getSelectedPartitionInfos(olapScanNode); OlapTable olapTable = olapScanNode.getOlapTable(); selectedBasePartitionInfos.put(olapTable.getId(), selectedPartitionIdVersions); } } return selectedBasePartitionInfos; } private Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> getSourceTableInfoPartitionInfos( ExecPlan execPlan) { Map<BaseTableInfo, Map<String, MaterializedView.BasePartitionInfo>> selectedBasePartitionInfos = Maps.newHashMap(); List<ScanNode> scanNodes = execPlan.getScanNodes(); for (ScanNode scanNode : scanNodes) { if (scanNode instanceof HdfsScanNode) { HdfsScanNode hdfsScanNode = (HdfsScanNode) scanNode; Table hiveTable = hdfsScanNode.getHiveTable(); Optional<BaseTableInfo> baseTableInfoOptional = materializedView.getBaseTableInfos().stream().filter( baseTableInfo -> baseTableInfo.getTableIdentifier().equals(hiveTable.getTableIdentifier())). findAny(); if (!baseTableInfoOptional.isPresent()) { continue; } Map<String, MaterializedView.BasePartitionInfo> selectedPartitionIdVersions = getSelectedPartitionInfos(hdfsScanNode, baseTableInfoOptional.get()); selectedBasePartitionInfos.put(baseTableInfoOptional.get(), selectedPartitionIdVersions); } } return selectedBasePartitionInfos; } @VisibleForTesting public void refreshMaterializedView(MvTaskRunContext mvContext, ExecPlan execPlan, InsertStmt insertStmt) throws Exception { Preconditions.checkNotNull(execPlan); Preconditions.checkNotNull(insertStmt); ConnectContext ctx = mvContext.getCtx(); StmtExecutor executor = new StmtExecutor(ctx, insertStmt); ctx.setExecutor(executor); ctx.setStmtId(new AtomicInteger().incrementAndGet()); ctx.setExecutionId(UUIDUtil.toTUniqueId(ctx.getQueryId())); try { executor.handleDMLStmt(execPlan, insertStmt); } finally { QeProcessorImpl.INSTANCE.unregisterQuery(ctx.getExecutionId()); auditAfterExec(mvContext, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } } @VisibleForTesting public Map<Long, Pair<BaseTableInfo, Table>> collectBaseTables(MaterializedView materializedView) { Map<Long, Pair<BaseTableInfo, Table>> tables = Maps.newHashMap(); List<BaseTableInfo> baseTableInfos = materializedView.getBaseTableInfos(); for (BaseTableInfo baseTableInfo : baseTableInfos) { Database db = baseTableInfo.getDb(); if (db == null) { LOG.warn("database {} do not exist when refreshing materialized view:{}", baseTableInfo.getDbInfoStr(), materializedView.getName()); throw new DmlException("database " + baseTableInfo.getDbInfoStr() + " do not exist."); } Table table = baseTableInfo.getTable(); if (table == null) { LOG.warn("table {} do not exist when refreshing materialized view:{}", baseTableInfo.getTableInfoStr(), materializedView.getName()); throw new DmlException("Materialized view base table: %s not exist.", baseTableInfo.getTableInfoStr()); } db.readLock(); try { if (table.isOlapTable()) { Table copied = new OlapTable(); if (!DeepCopy.copy(table, copied, OlapTable.class)) { throw new DmlException("Failed to copy olap table: %s", table.getName()); } tables.put(table.getId(), Pair.create(baseTableInfo, copied)); } else if (table.isCloudNativeTable()) { LakeTable copied = DeepCopy.copyWithGson(table, LakeTable.class); if (copied == null) { throw new DmlException("Failed to copy lake table: %s", table.getName()); } tables.put(table.getId(), Pair.create(baseTableInfo, copied)); } else { tables.put(table.getId(), Pair.create(baseTableInfo, table)); } } finally { db.readUnlock(); } } return tables; } private Map<String, String> getPartitionProperties(MaterializedView materializedView) { Map<String, String> partitionProperties = new HashMap<>(4); partitionProperties.put("replication_num", String.valueOf(materializedView.getDefaultReplicationNum())); partitionProperties.put("storage_medium", materializedView.getStorageMedium()); String storageCooldownTime = materializedView.getTableProperty().getProperties().get("storage_cooldown_time"); if (storageCooldownTime != null && !storageCooldownTime.equals(String.valueOf(DataProperty.MAX_COOLDOWN_TIME_MS))) { String storageCooldownTimeStr = TimeUtils.longToTimeString(Long.parseLong(storageCooldownTime)); partitionProperties.put("storage_cooldown_time", storageCooldownTimeStr); } return partitionProperties; } private DistributionDesc getDistributionDesc(MaterializedView materializedView) { HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) materializedView.getDefaultDistributionInfo(); List<String> distColumnNames = new ArrayList<>(); for (Column distributionColumn : hashDistributionInfo.getDistributionColumns()) { distColumnNames.add(distributionColumn.getName()); } return new HashDistributionDesc(hashDistributionInfo.getBucketNum(), distColumnNames); } private void addPartition(Database database, MaterializedView materializedView, String partitionName, Range<PartitionKey> partitionKeyRange, Map<String, String> partitionProperties, DistributionDesc distributionDesc) { String lowerBound = partitionKeyRange.lowerEndpoint().getKeys().get(0).getStringValue(); String upperBound = partitionKeyRange.upperEndpoint().getKeys().get(0).getStringValue(); boolean isMaxValue = partitionKeyRange.upperEndpoint().isMaxValue(); PartitionValue upperPartitionValue; if (isMaxValue) { upperPartitionValue = PartitionValue.MAX_VALUE; } else { upperPartitionValue = new PartitionValue(upperBound); } PartitionKeyDesc partitionKeyDesc = new PartitionKeyDesc( Collections.singletonList(new PartitionValue(lowerBound)), Collections.singletonList(upperPartitionValue)); SingleRangePartitionDesc singleRangePartitionDesc = new SingleRangePartitionDesc(false, partitionName, partitionKeyDesc, partitionProperties); try { GlobalStateMgr.getCurrentState().addPartitions( database, materializedView.getName(), new AddPartitionClause(singleRangePartitionDesc, distributionDesc, partitionProperties, false)); } catch (Exception e) { throw new DmlException("Expression add partition failed: %s, db: %s, table: %s", e, e.getMessage(), database.getFullName(), materializedView.getName()); } } private void dropPartition(Database database, MaterializedView materializedView, String mvPartitionName) { String dropPartitionName = materializedView.getPartition(mvPartitionName).getName(); if (!database.writeLockAndCheckExist()) { throw new DmlException("drop partition failed. database:" + database.getFullName() + " not exist"); } try { Table mv = database.getTable(materializedView.getId()); if (mv == null) { throw new DmlException("drop partition failed. mv:" + materializedView.getName() + " not exist"); } Partition mvPartition = mv.getPartition(dropPartitionName); if (mvPartition == null) { throw new DmlException("drop partition failed. partition:" + dropPartitionName + " not exist"); } GlobalStateMgr.getCurrentState().dropPartition( database, materializedView, new DropPartitionClause(false, dropPartitionName, false, true)); } catch (Exception e) { throw new DmlException("Expression add partition failed: %s, db: %s, table: %s", e, e.getMessage(), database.getFullName(), materializedView.getName()); } finally { database.writeUnlock(); } } private Map<String, MaterializedView.BasePartitionInfo> getSelectedPartitionInfos(OlapScanNode olapScanNode) { Map<String, MaterializedView.BasePartitionInfo> partitionInfos = Maps.newHashMap(); Collection<Long> selectedPartitionIds = olapScanNode.getSelectedPartitionIds(); Collection<String> selectedPartitionNames = olapScanNode.getSelectedPartitionNames(); Collection<Long> selectedPartitionVersions = olapScanNode.getSelectedPartitionVersions(); Iterator<Long> selectPartitionIdIterator = selectedPartitionIds.iterator(); Iterator<String> selectPartitionNameIterator = selectedPartitionNames.iterator(); Iterator<Long> selectPartitionVersionIterator = selectedPartitionVersions.iterator(); while (selectPartitionIdIterator.hasNext()) { long partitionId = selectPartitionIdIterator.next(); String partitionName = selectPartitionNameIterator.next(); long partitionVersion = selectPartitionVersionIterator.next(); partitionInfos.put(partitionName, new MaterializedView.BasePartitionInfo(partitionId, partitionVersion)); } return partitionInfos; } private Map<String, MaterializedView.BasePartitionInfo> getSelectedPartitionInfos(HdfsScanNode hdfsScanNode, BaseTableInfo baseTableInfo) { Map<String, MaterializedView.BasePartitionInfo> partitionInfos = Maps.newHashMap(); HiveTable hiveTable = hdfsScanNode.getHiveTable(); List<String> partitionColumnNames = hiveTable.getPartitionColumnNames(); List<String> selectedPartitionNames; if (hiveTable.isUnPartitioned()) { selectedPartitionNames = Lists.newArrayList(hiveTable.getTableName()); } else { Collection<Long> selectedPartitionIds = hdfsScanNode.getScanNodePredicates().getSelectedPartitionIds(); List<PartitionKey> selectedPartitionKey = Lists.newArrayList(); for (Long selectedPartitionId : selectedPartitionIds) { selectedPartitionKey .add(hdfsScanNode.getScanNodePredicates().getIdToPartitionKey().get(selectedPartitionId)); } selectedPartitionNames = selectedPartitionKey.stream().map(partitionKey -> PartitionUtil.toHivePartitionName(partitionColumnNames, partitionKey)).collect(Collectors.toList()); } List<com.starrocks.connector.PartitionInfo> hivePartitions = GlobalStateMgr. getCurrentState().getMetadataMgr().getPartitions(baseTableInfo.getCatalogName(), hiveTable, selectedPartitionNames); for (int index = 0; index < selectedPartitionNames.size(); ++index) { partitionInfos.put(selectedPartitionNames.get(index), new MaterializedView.BasePartitionInfo(-1, hivePartitions.get(index).getModifiedTime())); } return partitionInfos; } }
Put this just before the try-block, and remove the test for classLock==null in catch-block.
public NodeAdminProvider(Docker docker, MetricReceiverWrapper metricReceiver, Locking locking) { log.log(LogLevel.INFO, objectToString() + ": Creating object, acquiring lock..."); try { classLock = locking.lock(this.getClass()); log.log(LogLevel.INFO, objectToString() + ": Lock acquired"); Clock clock = Clock.systemUTC(); String dockerHostHostName = HostName.getLocalhost(); ProcessExecuter processExecuter = new ProcessExecuter(); Environment environment = new Environment(); ConfigServerHttpRequestExecutor requestExecutor = ConfigServerHttpRequestExecutor.create(environment.getConfigServerHosts()); NodeRepository nodeRepository = new NodeRepositoryImpl(requestExecutor, WEB_SERVICE_PORT); Orchestrator orchestrator = new OrchestratorImpl(requestExecutor, WEB_SERVICE_PORT); DockerOperations dockerOperations = new DockerOperationsImpl(docker, environment, processExecuter); StorageMaintainer storageMaintainer = new StorageMaintainer(docker, processExecuter, metricReceiver, environment, clock); AclMaintainer aclMaintainer = new AclMaintainer(dockerOperations, nodeRepository, dockerHostHostName); Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, storageMaintainer, aclMaintainer, environment, clock, NODE_AGENT_SCAN_INTERVAL); NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer, metricReceiver, clock); nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepository, orchestrator, storageMaintainer, nodeAdmin, dockerHostHostName, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL); nodeAdminStateUpdater.start(); } catch (Exception e) { if (classLock != null) classLock.close(); throw e; } }
classLock = locking.lock(this.getClass());
public NodeAdminProvider(Docker docker, MetricReceiverWrapper metricReceiver, Locking locking) { log.log(LogLevel.INFO, objectToString() + ": Creating object, acquiring lock..."); classLock = locking.lock(this.getClass()); try { log.log(LogLevel.INFO, objectToString() + ": Lock acquired"); Clock clock = Clock.systemUTC(); String dockerHostHostName = HostName.getLocalhost(); ProcessExecuter processExecuter = new ProcessExecuter(); Environment environment = new Environment(); ConfigServerHttpRequestExecutor requestExecutor = ConfigServerHttpRequestExecutor.create(environment.getConfigServerHosts()); NodeRepository nodeRepository = new NodeRepositoryImpl(requestExecutor, WEB_SERVICE_PORT); Orchestrator orchestrator = new OrchestratorImpl(requestExecutor, WEB_SERVICE_PORT); DockerOperations dockerOperations = new DockerOperationsImpl(docker, environment, processExecuter); StorageMaintainer storageMaintainer = new StorageMaintainer(docker, processExecuter, metricReceiver, environment, clock); AclMaintainer aclMaintainer = new AclMaintainer(dockerOperations, nodeRepository, dockerHostHostName); Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, storageMaintainer, aclMaintainer, environment, clock, NODE_AGENT_SCAN_INTERVAL); NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer, metricReceiver, clock); nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepository, orchestrator, storageMaintainer, nodeAdmin, dockerHostHostName, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL); nodeAdminStateUpdater.start(); } catch (Exception e) { classLock.close(); throw e; } }
class NodeAdminProvider implements Provider<NodeAdminStateUpdater> { private static final int WEB_SERVICE_PORT = getDefaults().vespaWebServicePort(); private static final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofSeconds(30); private static final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofSeconds(30); private final Logger log = Logger.getLogger(NodeAdminProvider.class.getName()); private final NodeAdminStateUpdater nodeAdminStateUpdater; private Lock classLock = null; @Inject @Override public NodeAdminStateUpdater get() { return nodeAdminStateUpdater; } @Override public void deconstruct() { log.log(LogLevel.INFO, objectToString() + ": Stop called"); nodeAdminStateUpdater.stop(); log.log(LogLevel.INFO, objectToString() + ": Stop complete"); classLock.close(); log.log(LogLevel.INFO, objectToString() + ": Lock released"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } }
class NodeAdminProvider implements Provider<NodeAdminStateUpdater> { private static final int WEB_SERVICE_PORT = getDefaults().vespaWebServicePort(); private static final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofSeconds(30); private static final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofSeconds(30); private final Logger log = Logger.getLogger(NodeAdminProvider.class.getName()); private final NodeAdminStateUpdater nodeAdminStateUpdater; private final Lock classLock; @Inject @Override public NodeAdminStateUpdater get() { return nodeAdminStateUpdater; } @Override public void deconstruct() { log.log(LogLevel.INFO, objectToString() + ": Stop called"); nodeAdminStateUpdater.stop(); log.log(LogLevel.INFO, objectToString() + ": Stop complete"); classLock.close(); log.log(LogLevel.INFO, objectToString() + ": Lock released"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } }
Fixed.
public NodeAdminProvider(Docker docker, MetricReceiverWrapper metricReceiver, Locking locking) { log.log(LogLevel.INFO, objectToString() + ": Creating object, acquiring lock..."); try { classLock = locking.lock(this.getClass()); log.log(LogLevel.INFO, objectToString() + ": Lock acquired"); Clock clock = Clock.systemUTC(); String dockerHostHostName = HostName.getLocalhost(); ProcessExecuter processExecuter = new ProcessExecuter(); Environment environment = new Environment(); ConfigServerHttpRequestExecutor requestExecutor = ConfigServerHttpRequestExecutor.create(environment.getConfigServerHosts()); NodeRepository nodeRepository = new NodeRepositoryImpl(requestExecutor, WEB_SERVICE_PORT); Orchestrator orchestrator = new OrchestratorImpl(requestExecutor, WEB_SERVICE_PORT); DockerOperations dockerOperations = new DockerOperationsImpl(docker, environment, processExecuter); StorageMaintainer storageMaintainer = new StorageMaintainer(docker, processExecuter, metricReceiver, environment, clock); AclMaintainer aclMaintainer = new AclMaintainer(dockerOperations, nodeRepository, dockerHostHostName); Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, storageMaintainer, aclMaintainer, environment, clock, NODE_AGENT_SCAN_INTERVAL); NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer, metricReceiver, clock); nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepository, orchestrator, storageMaintainer, nodeAdmin, dockerHostHostName, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL); nodeAdminStateUpdater.start(); } catch (Exception e) { if (classLock != null) classLock.close(); throw e; } }
classLock = locking.lock(this.getClass());
public NodeAdminProvider(Docker docker, MetricReceiverWrapper metricReceiver, Locking locking) { log.log(LogLevel.INFO, objectToString() + ": Creating object, acquiring lock..."); classLock = locking.lock(this.getClass()); try { log.log(LogLevel.INFO, objectToString() + ": Lock acquired"); Clock clock = Clock.systemUTC(); String dockerHostHostName = HostName.getLocalhost(); ProcessExecuter processExecuter = new ProcessExecuter(); Environment environment = new Environment(); ConfigServerHttpRequestExecutor requestExecutor = ConfigServerHttpRequestExecutor.create(environment.getConfigServerHosts()); NodeRepository nodeRepository = new NodeRepositoryImpl(requestExecutor, WEB_SERVICE_PORT); Orchestrator orchestrator = new OrchestratorImpl(requestExecutor, WEB_SERVICE_PORT); DockerOperations dockerOperations = new DockerOperationsImpl(docker, environment, processExecuter); StorageMaintainer storageMaintainer = new StorageMaintainer(docker, processExecuter, metricReceiver, environment, clock); AclMaintainer aclMaintainer = new AclMaintainer(dockerOperations, nodeRepository, dockerHostHostName); Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, storageMaintainer, aclMaintainer, environment, clock, NODE_AGENT_SCAN_INTERVAL); NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer, metricReceiver, clock); nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepository, orchestrator, storageMaintainer, nodeAdmin, dockerHostHostName, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL); nodeAdminStateUpdater.start(); } catch (Exception e) { classLock.close(); throw e; } }
class NodeAdminProvider implements Provider<NodeAdminStateUpdater> { private static final int WEB_SERVICE_PORT = getDefaults().vespaWebServicePort(); private static final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofSeconds(30); private static final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofSeconds(30); private final Logger log = Logger.getLogger(NodeAdminProvider.class.getName()); private final NodeAdminStateUpdater nodeAdminStateUpdater; private Lock classLock = null; @Inject @Override public NodeAdminStateUpdater get() { return nodeAdminStateUpdater; } @Override public void deconstruct() { log.log(LogLevel.INFO, objectToString() + ": Stop called"); nodeAdminStateUpdater.stop(); log.log(LogLevel.INFO, objectToString() + ": Stop complete"); classLock.close(); log.log(LogLevel.INFO, objectToString() + ": Lock released"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } }
class NodeAdminProvider implements Provider<NodeAdminStateUpdater> { private static final int WEB_SERVICE_PORT = getDefaults().vespaWebServicePort(); private static final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofSeconds(30); private static final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofSeconds(30); private final Logger log = Logger.getLogger(NodeAdminProvider.class.getName()); private final NodeAdminStateUpdater nodeAdminStateUpdater; private final Lock classLock; @Inject @Override public NodeAdminStateUpdater get() { return nodeAdminStateUpdater; } @Override public void deconstruct() { log.log(LogLevel.INFO, objectToString() + ": Stop called"); nodeAdminStateUpdater.stop(); log.log(LogLevel.INFO, objectToString() + ": Stop complete"); classLock.close(); log.log(LogLevel.INFO, objectToString() + ": Lock released"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } }
s/body_decoded_ize/body_decoded_size/
protected Method buildMethod() { Method method = new Method(METHOD_NAME, METHOD_PARAMS, METHOD_RETURN, this); method.methodDesc("Send a message bus request and get a reply back."); method.paramDesc(0, "header_encoding", "Encoding type of header.") .paramDesc(1, "header_decodedSize", "Number of bytes after header decoding.") .paramDesc(2, "header_payload", "Slime encoded header payload.") .paramDesc(3, "body_encoding", "Encoding type of body.") .paramDesc(4, "body_decoded_ize", "Number of bytes after body decoding.") .paramDesc(5, "body_payload", "Slime encoded body payload."); method.returnDesc(0, "header_encoding", "Encoding type of header.") .returnDesc(1, "header_decoded_size", "Number of bytes after header decoding.") .returnDesc(2, "header_payload", "Slime encoded header payload.") .returnDesc(3, "body_encoding", "Encoding type of body.") .returnDesc(4, "body_encoded_size", "Number of bytes after body decoding.") .returnDesc(5, "body_payload", "Slime encoded body payload."); return method; }
.paramDesc(4, "body_decoded_ize", "Number of bytes after body decoding.")
protected Method buildMethod() { Method method = new Method(METHOD_NAME, METHOD_PARAMS, METHOD_RETURN, this); method.methodDesc("Send a message bus request and get a reply back."); method.paramDesc(0, "header_encoding", "Encoding type of header.") .paramDesc(1, "header_decodedSize", "Number of bytes after header decoding.") .paramDesc(2, "header_payload", "Slime encoded header payload.") .paramDesc(3, "body_encoding", "Encoding type of body.") .paramDesc(4, "body_decoded_ize", "Number of bytes after body decoding.") .paramDesc(5, "body_payload", "Slime encoded body payload."); method.returnDesc(0, "header_encoding", "Encoding type of header.") .returnDesc(1, "header_decoded_size", "Number of bytes after header decoding.") .returnDesc(2, "header_payload", "Slime encoded header payload.") .returnDesc(3, "body_encoding", "Encoding type of body.") .returnDesc(4, "body_encoded_size", "Number of bytes after body decoding.") .returnDesc(5, "body_payload", "Slime encoded body payload."); return method; }
class RPCSendV2 extends RPCSend { private final static String METHOD_NAME = "mbus.slime"; private final static String METHOD_PARAMS = "bixbix"; private final static String METHOD_RETURN = "bixbix"; private final Compressor compressor = new Compressor(CompressionType.LZ4, 3, 90, 1024); @Override protected String getReturnSpec() { return METHOD_RETURN; } @Override private static final String VERSION_F = new String("version"); private static final String ROUTE_F = new String("route"); private static final String SESSION_F = new String("session"); private static final String PROTOCOL_F = new String("prot"); private static final String TRACELEVEL_F = new String("tracelevel"); private static final String TRACE_F = new String("trace"); private static final String USERETRY_F = new String("useretry"); private static final String RETRY_F = new String("retry"); private static final String RETRYDELAY_F = new String("retrydelay"); private static final String TIMEREMAINING_F = new String("timeleft"); private static final String ERRORS_F = new String("errors"); private static final String SERVICE_F = new String("service"); private static final String CODE_F = new String("code"); private static final String BLOB_F = new String("msg"); private static final String MSG_F = new String("msg"); @Override protected Request encodeRequest(Version version, Route route, RPCServiceAddress address, Message msg, long timeRemaining, byte[] payload, int traceLevel) { Request req = new Request(METHOD_NAME); Values v = req.parameters(); v.add(new Int8Value(CompressionType.NONE.getCode())); v.add(new Int32Value(0)); v.add(new DataValue(new byte[0])); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(VERSION_F, version.toString()); root.setString(ROUTE_F, route.toString()); root.setString(SESSION_F, address.getSessionName()); root.setString(PROTOCOL_F, msg.getProtocol().toString()); root.setBool(USERETRY_F, msg.getRetryEnabled()); root.setLong(RETRY_F, msg.getRetry()); root.setLong(TIMEREMAINING_F, msg.getTimeRemaining()); root.setLong(TRACELEVEL_F, traceLevel); root.setData(BLOB_F, payload); byte[] serializedSlime = BinaryFormat.encode(slime); Compressor.Compression compressionResult = compressor.compress(serializedSlime); v.add(new Int8Value(compressionResult.type().getCode())); v.add(new Int32Value(compressionResult.uncompressedSize())); v.add(new DataValue(compressionResult.data())); return req; } @Override protected Reply createReply(Values ret, String serviceName, Trace trace) { CompressionType compression = CompressionType.valueOf(ret.get(3).asInt8()); byte[] slimeBytes = compressor.decompress(ret.get(5).asData(), compression, ret.get(4).asInt32()); Slime slime = BinaryFormat.decode(slimeBytes); Inspector root = slime.get(); Version version = new Version(root.field(VERSION_F).asString()); byte[] payload = root.field(BLOB_F).asData(); Reply reply = null; Error error = null; if (payload.length > 0) { Object retval = decode(new Utf8Array(root.field(PROTOCOL_F).asUtf8()), version, payload); if (retval instanceof Reply) { reply = (Reply) retval; } else { error = (Error) retval; } } if (reply == null) { reply = new EmptyReply(); } if (error != null) { reply.addError(error); } reply.setRetryDelay(root.field(RETRYDELAY_F).asDouble()); Inspector errors = root.field(ERRORS_F); for (int i = 0; i < errors.entries(); i++) { Inspector e = errors.entry(i); String service = e.field(SERVICE_F).asString(); reply.addError(new Error((int)e.field(CODE_F).asLong(), e.field(MSG_F).asString(), (service != null && service.length() > 0) ? service : serviceName)); } if (trace.getLevel() > 0) { trace.getRoot().addChild(TraceNode.decode(root.field(TRACE_F).asString())); } return reply; } protected Params toParams(Values args) { CompressionType compression = CompressionType.valueOf(args.get(3).asInt8()); byte[] slimeBytes = compressor.decompress(args.get(5).asData(), compression, args.get(4).asInt32()); Slime slime = BinaryFormat.decode(slimeBytes); Inspector root = slime.get(); Params p = new Params(); p.version = new Version(root.field(VERSION_F).asString()); p.route = root.field(ROUTE_F).asString(); p.session = root.field(SESSION_F).asString(); p.retryEnabled = root.field(USERETRY_F).asBool(); p.retry = (int)root.field(RETRY_F).asLong(); p.timeRemaining = root.field(TIMEREMAINING_F).asLong(); p.protocolName = new Utf8Array(Utf8.toBytes(root.field(PROTOCOL_F).asString())); p.payload = root.field(BLOB_F).asData(); p.traceLevel = (int)root.field(TRACELEVEL_F).asLong(); return p; } @Override protected void createResponse(Values ret, Reply reply, Version version, byte [] payload) { ret.add(new Int8Value(CompressionType.NONE.getCode())); ret.add(new Int32Value(0)); ret.add(new DataValue(new byte[0])); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(VERSION_F, version.toString()); root.setDouble(RETRYDELAY_F, reply.getRetryDelay()); root.setString(PROTOCOL_F, reply.getProtocol().toString()); root.setData(BLOB_F, payload); if (reply.getTrace().getLevel() > 0) { root.setString(TRACE_F, reply.getTrace().getRoot().encode()); } if (reply.getNumErrors() > 0) { Cursor array = root.setArray(ERRORS_F); for (int i = 0; i < reply.getNumErrors(); i++) { Cursor e = array.addObject(); Error mbusE = reply.getError(i); e.setLong(CODE_F, mbusE.getCode()); e.setString(MSG_F, mbusE.getMessage()); if (mbusE.getService() != null) { e.setString(SERVICE_F, mbusE.getService()); } } } byte[] serializedSlime = BinaryFormat.encode(slime); Compressor.Compression compressionResult = compressor.compress(serializedSlime); ret.add(new Int8Value(compressionResult.type().getCode())); ret.add(new Int32Value(compressionResult.uncompressedSize())); ret.add(new DataValue(compressionResult.data())); } }
class RPCSendV2 extends RPCSend { private final static String METHOD_NAME = "mbus.slime"; private final static String METHOD_PARAMS = "bixbix"; private final static String METHOD_RETURN = "bixbix"; private final Compressor compressor = new Compressor(CompressionType.LZ4, 3, 90, 1024); @Override protected String getReturnSpec() { return METHOD_RETURN; } @Override private static final String VERSION_F = new String("version"); private static final String ROUTE_F = new String("route"); private static final String SESSION_F = new String("session"); private static final String PROTOCOL_F = new String("prot"); private static final String TRACELEVEL_F = new String("tracelevel"); private static final String TRACE_F = new String("trace"); private static final String USERETRY_F = new String("useretry"); private static final String RETRY_F = new String("retry"); private static final String RETRYDELAY_F = new String("retrydelay"); private static final String TIMEREMAINING_F = new String("timeleft"); private static final String ERRORS_F = new String("errors"); private static final String SERVICE_F = new String("service"); private static final String CODE_F = new String("code"); private static final String BLOB_F = new String("msg"); private static final String MSG_F = new String("msg"); @Override protected Request encodeRequest(Version version, Route route, RPCServiceAddress address, Message msg, long timeRemaining, byte[] payload, int traceLevel) { Request req = new Request(METHOD_NAME); Values v = req.parameters(); v.add(new Int8Value(CompressionType.NONE.getCode())); v.add(new Int32Value(0)); v.add(new DataValue(new byte[0])); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(VERSION_F, version.toString()); root.setString(ROUTE_F, route.toString()); root.setString(SESSION_F, address.getSessionName()); root.setString(PROTOCOL_F, msg.getProtocol().toString()); root.setBool(USERETRY_F, msg.getRetryEnabled()); root.setLong(RETRY_F, msg.getRetry()); root.setLong(TIMEREMAINING_F, msg.getTimeRemaining()); root.setLong(TRACELEVEL_F, traceLevel); root.setData(BLOB_F, payload); byte[] serializedSlime = BinaryFormat.encode(slime); Compressor.Compression compressionResult = compressor.compress(serializedSlime); v.add(new Int8Value(compressionResult.type().getCode())); v.add(new Int32Value(compressionResult.uncompressedSize())); v.add(new DataValue(compressionResult.data())); return req; } @Override protected Reply createReply(Values ret, String serviceName, Trace trace) { CompressionType compression = CompressionType.valueOf(ret.get(3).asInt8()); byte[] slimeBytes = compressor.decompress(ret.get(5).asData(), compression, ret.get(4).asInt32()); Slime slime = BinaryFormat.decode(slimeBytes); Inspector root = slime.get(); Version version = new Version(root.field(VERSION_F).asString()); byte[] payload = root.field(BLOB_F).asData(); Reply reply = null; Error error = null; if (payload.length > 0) { Object retval = decode(new Utf8Array(root.field(PROTOCOL_F).asUtf8()), version, payload); if (retval instanceof Reply) { reply = (Reply) retval; } else { error = (Error) retval; } } if (reply == null) { reply = new EmptyReply(); } if (error != null) { reply.addError(error); } reply.setRetryDelay(root.field(RETRYDELAY_F).asDouble()); Inspector errors = root.field(ERRORS_F); for (int i = 0; i < errors.entries(); i++) { Inspector e = errors.entry(i); String service = e.field(SERVICE_F).asString(); reply.addError(new Error((int)e.field(CODE_F).asLong(), e.field(MSG_F).asString(), (service != null && service.length() > 0) ? service : serviceName)); } if (trace.getLevel() > 0) { trace.getRoot().addChild(TraceNode.decode(root.field(TRACE_F).asString())); } return reply; } protected Params toParams(Values args) { CompressionType compression = CompressionType.valueOf(args.get(3).asInt8()); byte[] slimeBytes = compressor.decompress(args.get(5).asData(), compression, args.get(4).asInt32()); Slime slime = BinaryFormat.decode(slimeBytes); Inspector root = slime.get(); Params p = new Params(); p.version = new Version(root.field(VERSION_F).asString()); p.route = root.field(ROUTE_F).asString(); p.session = root.field(SESSION_F).asString(); p.retryEnabled = root.field(USERETRY_F).asBool(); p.retry = (int)root.field(RETRY_F).asLong(); p.timeRemaining = root.field(TIMEREMAINING_F).asLong(); p.protocolName = new Utf8Array(Utf8.toBytes(root.field(PROTOCOL_F).asString())); p.payload = root.field(BLOB_F).asData(); p.traceLevel = (int)root.field(TRACELEVEL_F).asLong(); return p; } @Override protected void createResponse(Values ret, Reply reply, Version version, byte [] payload) { ret.add(new Int8Value(CompressionType.NONE.getCode())); ret.add(new Int32Value(0)); ret.add(new DataValue(new byte[0])); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(VERSION_F, version.toString()); root.setDouble(RETRYDELAY_F, reply.getRetryDelay()); root.setString(PROTOCOL_F, reply.getProtocol().toString()); root.setData(BLOB_F, payload); if (reply.getTrace().getLevel() > 0) { root.setString(TRACE_F, reply.getTrace().getRoot().encode()); } if (reply.getNumErrors() > 0) { Cursor array = root.setArray(ERRORS_F); for (int i = 0; i < reply.getNumErrors(); i++) { Cursor e = array.addObject(); Error mbusE = reply.getError(i); e.setLong(CODE_F, mbusE.getCode()); e.setString(MSG_F, mbusE.getMessage()); if (mbusE.getService() != null) { e.setString(SERVICE_F, mbusE.getService()); } } } byte[] serializedSlime = BinaryFormat.encode(slime); Compressor.Compression compressionResult = compressor.compress(serializedSlime); ret.add(new Int8Value(compressionResult.type().getCode())); ret.add(new Int32Value(compressionResult.uncompressedSize())); ret.add(new DataValue(compressionResult.data())); } }
Why do we need to sleep 1 second?
public void testRedeployWillPurgeOldNonActiveDeployments() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir() .getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester("src/test/apps/app", configserverConfig, clock); tester.deployApp("myapp", Instant.now()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); try { Thread.sleep(1000);} catch (InterruptedException e) { /* Ignored */ } LocalSession deployment3session = ((Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId()); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); Optional<com.yahoo.config.provision.Deployment> deployment4 = tester.redeployFromLocalActive(); assertTrue(deployment4.isPresent()); deployment4.get().activate(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(2, sessions.size()); final Set<Long> sessionIds = sessions.stream().map(Session::getSessionId).collect(Collectors.toSet()); assertTrue(sessionIds.contains(3L)); assertTrue(sessionIds.contains(5L)); }
try { Thread.sleep(1000);} catch (InterruptedException e) { /* Ignored */ }
public void testRedeployWillPurgeOldNonActiveDeployments() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir() .getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester("src/test/apps/app", configserverConfig, clock); tester.deployApp("myapp", Instant.now()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId()); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); Optional<com.yahoo.config.provision.Deployment> deployment4 = tester.redeployFromLocalActive(); assertTrue(deployment4.isPresent()); deployment4.get().activate(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(2, sessions.size()); final Set<Long> sessionIds = sessions.stream().map(Session::getSessionId).collect(Collectors.toSet()); assertTrue(sessionIds.contains(3L)); assertTrue(sessionIds.contains(5L)); }
class RedeployTest { @Test public void testRedeploy() throws InterruptedException, IOException { DeployTester tester = new DeployTester("src/test/apps/app"); tester.deployApp("myapp", Instant.now()); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); long activeSessionIdBefore = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdBefore).getApplicationId()); deployment.get().prepare(); deployment.get().activate(); long activeSessionIdAfter = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(activeSessionIdAfter, activeSessionIdBefore + 1); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdAfter).getApplicationId()); } /** No deployment is done because there is no local active session. */ @Test public void testNoRedeploy() { List<ModelFactory> modelFactories = new ArrayList<>(); modelFactories.add(DeployTester.createModelFactory(Clock.systemUTC())); modelFactories.add(DeployTester.createFailingModelFactory(Version.fromIntValues(1, 0, 0))); DeployTester tester = new DeployTester("ignored/app/path", modelFactories); ApplicationId id = ApplicationId.from(TenantName.from("default"), ApplicationName.from("default"), InstanceName.from("default")); assertFalse(tester.redeployFromLocalActive(id).isPresent()); } @Test }
class RedeployTest { @Test public void testRedeploy() throws InterruptedException, IOException { DeployTester tester = new DeployTester("src/test/apps/app"); tester.deployApp("myapp", Instant.now()); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); long activeSessionIdBefore = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdBefore).getApplicationId()); deployment.get().prepare(); deployment.get().activate(); long activeSessionIdAfter = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(activeSessionIdAfter, activeSessionIdBefore + 1); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdAfter).getApplicationId()); } /** No deployment is done because there is no local active session. */ @Test public void testNoRedeploy() { List<ModelFactory> modelFactories = new ArrayList<>(); modelFactories.add(DeployTester.createModelFactory(Clock.systemUTC())); modelFactories.add(DeployTester.createFailingModelFactory(Version.fromIntValues(1, 0, 0))); DeployTester tester = new DeployTester("ignored/app/path", modelFactories); ApplicationId id = ApplicationId.from(TenantName.from("default"), ApplicationName.from("default"), InstanceName.from("default")); assertFalse(tester.redeployFromLocalActive(id).isPresent()); } @Test }
We don't have to, just added while writing the test, will remove
public void testRedeployWillPurgeOldNonActiveDeployments() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir() .getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester("src/test/apps/app", configserverConfig, clock); tester.deployApp("myapp", Instant.now()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); try { Thread.sleep(1000);} catch (InterruptedException e) { /* Ignored */ } LocalSession deployment3session = ((Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId()); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); Optional<com.yahoo.config.provision.Deployment> deployment4 = tester.redeployFromLocalActive(); assertTrue(deployment4.isPresent()); deployment4.get().activate(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(2, sessions.size()); final Set<Long> sessionIds = sessions.stream().map(Session::getSessionId).collect(Collectors.toSet()); assertTrue(sessionIds.contains(3L)); assertTrue(sessionIds.contains(5L)); }
try { Thread.sleep(1000);} catch (InterruptedException e) { /* Ignored */ }
public void testRedeployWillPurgeOldNonActiveDeployments() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir() .getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester("src/test/apps/app", configserverConfig, clock); tester.deployApp("myapp", Instant.now()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId()); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); Optional<com.yahoo.config.provision.Deployment> deployment4 = tester.redeployFromLocalActive(); assertTrue(deployment4.isPresent()); deployment4.get().activate(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(2, sessions.size()); final Set<Long> sessionIds = sessions.stream().map(Session::getSessionId).collect(Collectors.toSet()); assertTrue(sessionIds.contains(3L)); assertTrue(sessionIds.contains(5L)); }
class RedeployTest { @Test public void testRedeploy() throws InterruptedException, IOException { DeployTester tester = new DeployTester("src/test/apps/app"); tester.deployApp("myapp", Instant.now()); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); long activeSessionIdBefore = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdBefore).getApplicationId()); deployment.get().prepare(); deployment.get().activate(); long activeSessionIdAfter = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(activeSessionIdAfter, activeSessionIdBefore + 1); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdAfter).getApplicationId()); } /** No deployment is done because there is no local active session. */ @Test public void testNoRedeploy() { List<ModelFactory> modelFactories = new ArrayList<>(); modelFactories.add(DeployTester.createModelFactory(Clock.systemUTC())); modelFactories.add(DeployTester.createFailingModelFactory(Version.fromIntValues(1, 0, 0))); DeployTester tester = new DeployTester("ignored/app/path", modelFactories); ApplicationId id = ApplicationId.from(TenantName.from("default"), ApplicationName.from("default"), InstanceName.from("default")); assertFalse(tester.redeployFromLocalActive(id).isPresent()); } @Test }
class RedeployTest { @Test public void testRedeploy() throws InterruptedException, IOException { DeployTester tester = new DeployTester("src/test/apps/app"); tester.deployApp("myapp", Instant.now()); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); long activeSessionIdBefore = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdBefore).getApplicationId()); deployment.get().prepare(); deployment.get().activate(); long activeSessionIdAfter = tester.tenant().getLocalSessionRepo().getActiveSession(tester.applicationId()).getSessionId(); assertEquals(activeSessionIdAfter, activeSessionIdBefore + 1); assertEquals(tester.applicationId(), tester.tenant().getLocalSessionRepo().getSession(activeSessionIdAfter).getApplicationId()); } /** No deployment is done because there is no local active session. */ @Test public void testNoRedeploy() { List<ModelFactory> modelFactories = new ArrayList<>(); modelFactories.add(DeployTester.createModelFactory(Clock.systemUTC())); modelFactories.add(DeployTester.createFailingModelFactory(Version.fromIntValues(1, 0, 0))); DeployTester tester = new DeployTester("ignored/app/path", modelFactories); ApplicationId id = ApplicationId.from(TenantName.from("default"), ApplicationName.from("default"), InstanceName.from("default")); assertFalse(tester.redeployFromLocalActive(id).isPresent()); } @Test }
We should not call the builtin Object::notifyAll to avoid exposing implementation details.
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } synchronized (classLocking) { classLocking.notifyAll(); } signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.info(objectToString() + ": Releasing lock"); if (classLock != null) { classLock.close(); } log.info(objectToString() + ": Stop complete"); }
classLocking.notifyAll();
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private ClassLock classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = classLocking.tryLock(NodeAdminStateUpdater.class, () -> !terminated.get()); } catch (LockInterruptException e) { return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
This should be inside the if-block
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } synchronized (classLocking) { classLocking.notifyAll(); } signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.info(objectToString() + ": Releasing lock"); if (classLock != null) { classLock.close(); } log.info(objectToString() + ": Stop complete"); }
log.info(objectToString() + ": Releasing lock");
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private ClassLock classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = classLocking.tryLock(NodeAdminStateUpdater.class, () -> !terminated.get()); } catch (LockInterruptException e) { return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
If the classLock is optional I suggest we make that clearer by using Optional
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } synchronized (classLocking) { classLocking.notifyAll(); } signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.info(objectToString() + ": Releasing lock"); if (classLock != null) { classLock.close(); } log.info(objectToString() + ": Stop complete"); }
if (classLock != null) {
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private ClassLock classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = classLocking.tryLock(NodeAdminStateUpdater.class, () -> !terminated.get()); } catch (LockInterruptException e) { return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
Fixed.
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } synchronized (classLocking) { classLocking.notifyAll(); } signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.info(objectToString() + ": Releasing lock"); if (classLock != null) { classLock.close(); } log.info(objectToString() + ": Stop complete"); }
log.info(objectToString() + ": Releasing lock");
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private ClassLock classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = classLocking.tryLock(NodeAdminStateUpdater.class, () -> !terminated.get()); } catch (LockInterruptException e) { return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
Fixed.
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } synchronized (classLocking) { classLocking.notifyAll(); } signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); log.info(objectToString() + ": Releasing lock"); if (classLock != null) { classLock.close(); } log.info(objectToString() + ": Stop complete"); }
classLocking.notifyAll();
public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private ClassLock classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = classLocking.tryLock(NodeAdminStateUpdater.class, () -> !terminated.get()); } catch (LockInterruptException e) { return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("NodeAdmin", nodeAdmin.debugInfo()); debug.put("Wanted State: ", wantedState); debug.put("Current State: ", currentState); } return debug; } private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { String hardwareDivergence = maintainer.getHardwareDivergence(); NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == RESUMED) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) { return; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } }
Port number changes might cause issues during version upgrade, where some nodes run with old vespa version (using old config model) and some nodes run with new vespa version. e.g. health check trying to use wrong port will see node as down.
public int getHttpPort() { return getRelativePort(2); }
return getRelativePort(2);
public int getHttpPort() { return getRelativePort(4); }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> { private final String name; private final NodeSpec nodeSpec; private final String clusterName; private final ContentNode contentNode; private final boolean flushOnShutdown; private final Optional<Tuning> tuning; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; this.contentNode = node; this.flushOnShutdown = flushOnShutdown; this.tuning = tuning; } @Override protected SearchNode doBuild(AbstractConfigProducer ancestor, Element producerSpec) { return new SearchNode(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning); } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> { private final String name; private final NodeSpec nodeSpec; private final String clusterName; private final ContentNode contentNode; private final boolean flushOnShutdown; private final Optional<Tuning> tuning; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; this.contentNode = node; this.flushOnShutdown = flushOnShutdown; this.tuning = tuning; } @Override protected SearchNode doBuild(AbstractConfigProducer ancestor, Element producerSpec) { return new SearchNode(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning); } }
Use Duration.ofSeconds?
private void configure(FleetcontrollerConfig config) { options.clusterName = config.cluster_name(); options.fleetControllerIndex = config.index(); options.fleetControllerCount = config.fleet_controller_count(); options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000); options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000); options.stateGatherCount = config.state_gather_count(); options.rpcPort = config.rpc_port(); options.httpPort = config.http_port(); options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time()); options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time()); options.maxInitProgressTime = config.init_progress_time(); options.statePollingFrequency = config.state_polling_frequency(); options.maxPrematureCrashes = config.max_premature_crashes(); options.stableStateTimePeriod = config.stable_state_time_period(); options.eventLogMaxSize = config.event_log_max_size(); options.eventNodeLogMaxSize = config.event_node_log_max_size(); options.minDistributorNodesUp = config.min_distributors_up_count(); options.minStorageNodesUp = config.min_storage_up_count(); options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio(); options.minRatioOfStorageNodesUp = config.min_storage_up_ratio(); options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000); options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000); options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000); options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log(); options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates(); options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000); options.distributionBits = config.ideal_distribution_bits(); options.minNodeRatioPerGroup = config.min_node_ratio_per_group(); options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000))); }
options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000)));
private void configure(FleetcontrollerConfig config) { options.clusterName = config.cluster_name(); options.fleetControllerIndex = config.index(); options.fleetControllerCount = config.fleet_controller_count(); options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000); options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000); options.stateGatherCount = config.state_gather_count(); options.rpcPort = config.rpc_port(); options.httpPort = config.http_port(); options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time()); options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time()); options.maxInitProgressTime = config.init_progress_time(); options.statePollingFrequency = config.state_polling_frequency(); options.maxPrematureCrashes = config.max_premature_crashes(); options.stableStateTimePeriod = config.stable_state_time_period(); options.eventLogMaxSize = config.event_log_max_size(); options.eventNodeLogMaxSize = config.event_node_log_max_size(); options.minDistributorNodesUp = config.min_distributors_up_count(); options.minStorageNodesUp = config.min_storage_up_count(); options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio(); options.minRatioOfStorageNodesUp = config.min_storage_up_ratio(); options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000); options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000); options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000); options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log(); options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates(); options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000); options.distributionBits = config.ideal_distribution_bits(); options.minNodeRatioPerGroup = config.min_node_ratio_per_group(); options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000))); }
class ClusterControllerClusterConfigurer { private final FleetControllerOptions options = new FleetControllerOptions(null); public ClusterControllerClusterConfigurer(ClusterController controller, StorDistributionConfig distributionConfig, FleetcontrollerConfig fleetcontrollerConfig, SlobroksConfig slobroksConfig, ZookeepersConfig zookeepersConfig, Metric metricImpl) throws Exception { configure(distributionConfig); configure(fleetcontrollerConfig); configure(slobroksConfig); configure(zookeepersConfig); checkIfZooKeeperNeeded(); if (controller != null) { controller.setOptions(options.clusterName, options, metricImpl); } } public FleetControllerOptions getOptions() { return options; } private void configure(StorDistributionConfig config) { options.setStorageDistribution(new Distribution(config)); } private void configure(SlobroksConfig config) { String specs[] = new String[config.slobrok().size()]; for (int i = 0; i < config.slobrok().size(); i++) { specs[i] = config.slobrok().get(i).connectionspec(); } options.slobrokConnectionSpecs = specs; } private void configure(ZookeepersConfig config) { options.zooKeeperServerAddress = config.zookeeperserverlist(); } private void checkIfZooKeeperNeeded() { if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) { if (options.fleetControllerCount > 1) { throw new IllegalArgumentException( "Must set zookeeper server with multiple fleetcontrollers"); } else { options.zooKeeperServerAddress = null; } } } }
class ClusterControllerClusterConfigurer { private final FleetControllerOptions options = new FleetControllerOptions(null); public ClusterControllerClusterConfigurer(ClusterController controller, StorDistributionConfig distributionConfig, FleetcontrollerConfig fleetcontrollerConfig, SlobroksConfig slobroksConfig, ZookeepersConfig zookeepersConfig, Metric metricImpl) throws Exception { configure(distributionConfig); configure(fleetcontrollerConfig); configure(slobroksConfig); configure(zookeepersConfig); checkIfZooKeeperNeeded(); if (controller != null) { controller.setOptions(options.clusterName, options, metricImpl); } } public FleetControllerOptions getOptions() { return options; } private void configure(StorDistributionConfig config) { options.setStorageDistribution(new Distribution(config)); } private void configure(SlobroksConfig config) { String specs[] = new String[config.slobrok().size()]; for (int i = 0; i < config.slobrok().size(); i++) { specs[i] = config.slobrok().get(i).connectionspec(); } options.slobrokConnectionSpecs = specs; } private void configure(ZookeepersConfig config) { options.zooKeeperServerAddress = config.zookeeperserverlist(); } private void checkIfZooKeeperNeeded() { if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) { if (options.fleetControllerCount > 1) { throw new IllegalArgumentException( "Must set zookeeper server with multiple fleetcontrollers"); } else { options.zooKeeperServerAddress = null; } } } }
`Duration.ofSeconds` expects discrete seconds as a `long`, but config is a double. Using `ofMillis` lets people specify e.g. a 0.5 second wait time and have it be converted to 500ms internally. Precision loss either way, but allows for sub-second, at least.
private void configure(FleetcontrollerConfig config) { options.clusterName = config.cluster_name(); options.fleetControllerIndex = config.index(); options.fleetControllerCount = config.fleet_controller_count(); options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000); options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000); options.stateGatherCount = config.state_gather_count(); options.rpcPort = config.rpc_port(); options.httpPort = config.http_port(); options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time()); options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time()); options.maxInitProgressTime = config.init_progress_time(); options.statePollingFrequency = config.state_polling_frequency(); options.maxPrematureCrashes = config.max_premature_crashes(); options.stableStateTimePeriod = config.stable_state_time_period(); options.eventLogMaxSize = config.event_log_max_size(); options.eventNodeLogMaxSize = config.event_node_log_max_size(); options.minDistributorNodesUp = config.min_distributors_up_count(); options.minStorageNodesUp = config.min_storage_up_count(); options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio(); options.minRatioOfStorageNodesUp = config.min_storage_up_ratio(); options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000); options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000); options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000); options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log(); options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates(); options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000); options.distributionBits = config.ideal_distribution_bits(); options.minNodeRatioPerGroup = config.min_node_ratio_per_group(); options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000))); }
options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000)));
private void configure(FleetcontrollerConfig config) { options.clusterName = config.cluster_name(); options.fleetControllerIndex = config.index(); options.fleetControllerCount = config.fleet_controller_count(); options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000); options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000); options.stateGatherCount = config.state_gather_count(); options.rpcPort = config.rpc_port(); options.httpPort = config.http_port(); options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time()); options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time()); options.maxInitProgressTime = config.init_progress_time(); options.statePollingFrequency = config.state_polling_frequency(); options.maxPrematureCrashes = config.max_premature_crashes(); options.stableStateTimePeriod = config.stable_state_time_period(); options.eventLogMaxSize = config.event_log_max_size(); options.eventNodeLogMaxSize = config.event_node_log_max_size(); options.minDistributorNodesUp = config.min_distributors_up_count(); options.minStorageNodesUp = config.min_storage_up_count(); options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio(); options.minRatioOfStorageNodesUp = config.min_storage_up_ratio(); options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000); options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000); options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000); options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log(); options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates(); options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000); options.distributionBits = config.ideal_distribution_bits(); options.minNodeRatioPerGroup = config.min_node_ratio_per_group(); options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000))); }
class ClusterControllerClusterConfigurer { private final FleetControllerOptions options = new FleetControllerOptions(null); public ClusterControllerClusterConfigurer(ClusterController controller, StorDistributionConfig distributionConfig, FleetcontrollerConfig fleetcontrollerConfig, SlobroksConfig slobroksConfig, ZookeepersConfig zookeepersConfig, Metric metricImpl) throws Exception { configure(distributionConfig); configure(fleetcontrollerConfig); configure(slobroksConfig); configure(zookeepersConfig); checkIfZooKeeperNeeded(); if (controller != null) { controller.setOptions(options.clusterName, options, metricImpl); } } public FleetControllerOptions getOptions() { return options; } private void configure(StorDistributionConfig config) { options.setStorageDistribution(new Distribution(config)); } private void configure(SlobroksConfig config) { String specs[] = new String[config.slobrok().size()]; for (int i = 0; i < config.slobrok().size(); i++) { specs[i] = config.slobrok().get(i).connectionspec(); } options.slobrokConnectionSpecs = specs; } private void configure(ZookeepersConfig config) { options.zooKeeperServerAddress = config.zookeeperserverlist(); } private void checkIfZooKeeperNeeded() { if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) { if (options.fleetControllerCount > 1) { throw new IllegalArgumentException( "Must set zookeeper server with multiple fleetcontrollers"); } else { options.zooKeeperServerAddress = null; } } } }
class ClusterControllerClusterConfigurer { private final FleetControllerOptions options = new FleetControllerOptions(null); public ClusterControllerClusterConfigurer(ClusterController controller, StorDistributionConfig distributionConfig, FleetcontrollerConfig fleetcontrollerConfig, SlobroksConfig slobroksConfig, ZookeepersConfig zookeepersConfig, Metric metricImpl) throws Exception { configure(distributionConfig); configure(fleetcontrollerConfig); configure(slobroksConfig); configure(zookeepersConfig); checkIfZooKeeperNeeded(); if (controller != null) { controller.setOptions(options.clusterName, options, metricImpl); } } public FleetControllerOptions getOptions() { return options; } private void configure(StorDistributionConfig config) { options.setStorageDistribution(new Distribution(config)); } private void configure(SlobroksConfig config) { String specs[] = new String[config.slobrok().size()]; for (int i = 0; i < config.slobrok().size(); i++) { specs[i] = config.slobrok().get(i).connectionspec(); } options.slobrokConnectionSpecs = specs; } private void configure(ZookeepersConfig config) { options.zooKeeperServerAddress = config.zookeeperserverlist(); } private void checkIfZooKeeperNeeded() { if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) { if (options.fleetControllerCount > 1) { throw new IllegalArgumentException( "Must set zookeeper server with multiple fleetcontrollers"); } else { options.zooKeeperServerAddress = null; } } } }
Ah, I thought max_deferred_task_version_wait_time_sec was an integral type.
private void configure(FleetcontrollerConfig config) { options.clusterName = config.cluster_name(); options.fleetControllerIndex = config.index(); options.fleetControllerCount = config.fleet_controller_count(); options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000); options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000); options.stateGatherCount = config.state_gather_count(); options.rpcPort = config.rpc_port(); options.httpPort = config.http_port(); options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time()); options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time()); options.maxInitProgressTime = config.init_progress_time(); options.statePollingFrequency = config.state_polling_frequency(); options.maxPrematureCrashes = config.max_premature_crashes(); options.stableStateTimePeriod = config.stable_state_time_period(); options.eventLogMaxSize = config.event_log_max_size(); options.eventNodeLogMaxSize = config.event_node_log_max_size(); options.minDistributorNodesUp = config.min_distributors_up_count(); options.minStorageNodesUp = config.min_storage_up_count(); options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio(); options.minRatioOfStorageNodesUp = config.min_storage_up_ratio(); options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000); options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000); options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000); options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log(); options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates(); options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000); options.distributionBits = config.ideal_distribution_bits(); options.minNodeRatioPerGroup = config.min_node_ratio_per_group(); options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000))); }
options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000)));
private void configure(FleetcontrollerConfig config) { options.clusterName = config.cluster_name(); options.fleetControllerIndex = config.index(); options.fleetControllerCount = config.fleet_controller_count(); options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000); options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000); options.stateGatherCount = config.state_gather_count(); options.rpcPort = config.rpc_port(); options.httpPort = config.http_port(); options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time()); options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time()); options.maxInitProgressTime = config.init_progress_time(); options.statePollingFrequency = config.state_polling_frequency(); options.maxPrematureCrashes = config.max_premature_crashes(); options.stableStateTimePeriod = config.stable_state_time_period(); options.eventLogMaxSize = config.event_log_max_size(); options.eventNodeLogMaxSize = config.event_node_log_max_size(); options.minDistributorNodesUp = config.min_distributors_up_count(); options.minStorageNodesUp = config.min_storage_up_count(); options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio(); options.minRatioOfStorageNodesUp = config.min_storage_up_ratio(); options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000); options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000); options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000); options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log(); options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates(); options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000); options.distributionBits = config.ideal_distribution_bits(); options.minNodeRatioPerGroup = config.min_node_ratio_per_group(); options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000))); }
class ClusterControllerClusterConfigurer { private final FleetControllerOptions options = new FleetControllerOptions(null); public ClusterControllerClusterConfigurer(ClusterController controller, StorDistributionConfig distributionConfig, FleetcontrollerConfig fleetcontrollerConfig, SlobroksConfig slobroksConfig, ZookeepersConfig zookeepersConfig, Metric metricImpl) throws Exception { configure(distributionConfig); configure(fleetcontrollerConfig); configure(slobroksConfig); configure(zookeepersConfig); checkIfZooKeeperNeeded(); if (controller != null) { controller.setOptions(options.clusterName, options, metricImpl); } } public FleetControllerOptions getOptions() { return options; } private void configure(StorDistributionConfig config) { options.setStorageDistribution(new Distribution(config)); } private void configure(SlobroksConfig config) { String specs[] = new String[config.slobrok().size()]; for (int i = 0; i < config.slobrok().size(); i++) { specs[i] = config.slobrok().get(i).connectionspec(); } options.slobrokConnectionSpecs = specs; } private void configure(ZookeepersConfig config) { options.zooKeeperServerAddress = config.zookeeperserverlist(); } private void checkIfZooKeeperNeeded() { if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) { if (options.fleetControllerCount > 1) { throw new IllegalArgumentException( "Must set zookeeper server with multiple fleetcontrollers"); } else { options.zooKeeperServerAddress = null; } } } }
class ClusterControllerClusterConfigurer { private final FleetControllerOptions options = new FleetControllerOptions(null); public ClusterControllerClusterConfigurer(ClusterController controller, StorDistributionConfig distributionConfig, FleetcontrollerConfig fleetcontrollerConfig, SlobroksConfig slobroksConfig, ZookeepersConfig zookeepersConfig, Metric metricImpl) throws Exception { configure(distributionConfig); configure(fleetcontrollerConfig); configure(slobroksConfig); configure(zookeepersConfig); checkIfZooKeeperNeeded(); if (controller != null) { controller.setOptions(options.clusterName, options, metricImpl); } } public FleetControllerOptions getOptions() { return options; } private void configure(StorDistributionConfig config) { options.setStorageDistribution(new Distribution(config)); } private void configure(SlobroksConfig config) { String specs[] = new String[config.slobrok().size()]; for (int i = 0; i < config.slobrok().size(); i++) { specs[i] = config.slobrok().get(i).connectionspec(); } options.slobrokConnectionSpecs = specs; } private void configure(ZookeepersConfig config) { options.zooKeeperServerAddress = config.zookeeperserverlist(); } private void checkIfZooKeeperNeeded() { if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) { if (options.fleetControllerCount > 1) { throw new IllegalArgumentException( "Must set zookeeper server with multiple fleetcontrollers"); } else { options.zooKeeperServerAddress = null; } } } }
Won't this cause WARNING-spam in CD, if all integration tests end up triggering this?
private List<BuildJob> getJobs(boolean removeFromQueue) { try (Lock lock = curator.lockJobQueues()) { List<BuildJob> jobsToRun = new ArrayList<>(); for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); for (ApplicationId a : queue) { ApplicationId application = removeFromQueue ? queue.poll() : a; Optional<Long> projectId = projectId(application); if (projectId.isPresent()) { jobsToRun.add(new BuildJob(projectId.get(), jobType.id())); } else { log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() + " because project ID is missing"); } if (removeFromQueue && isCapacityConstrained(jobType)) break; } if (removeFromQueue) curator.writeJobQueue(jobType, queue); } return Collections.unmodifiableList(jobsToRun); } }
log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() +
private List<BuildJob> getJobs(boolean removeFromQueue) { try (Lock lock = curator.lockJobQueues()) { List<BuildJob> jobsToRun = new ArrayList<>(); for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); for (ApplicationId a : queue) { ApplicationId application = removeFromQueue ? queue.poll() : a; Optional<Long> projectId = projectId(application); if (projectId.isPresent()) { jobsToRun.add(new BuildJob(projectId.get(), jobType.id())); } else { log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() + " because project ID is missing"); } if (removeFromQueue && isCapacityConstrained(jobType)) break; } if (removeFromQueue) curator.writeJobQueue(jobType, queue); } return Collections.unmodifiableList(jobsToRun); } }
class PolledBuildSystem implements BuildSystem { private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName()); private final Controller controller; private final CuratorDb curator; public PolledBuildSystem(Controller controller, CuratorDb curator) { this.controller = controller; this.curator = curator; } @Override public void addJob(ApplicationId application, JobType jobType, boolean first) { try (Lock lock = curator.lockJobQueues()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); if ( ! queue.contains(application)) { if (first) { queue.addFirst(application); } else { queue.add(application); } } curator.writeJobQueue(jobType, queue); } } @Override public List<BuildJob> jobs() { return getJobs(false); } @Override public List<BuildJob> takeJobsToRun() { return getJobs(true); } @Override public void removeJobs(ApplicationId application) { try (Lock lock = curator.lockJobQueues()) { for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); while (queue.remove(application)) { } curator.writeJobQueue(jobType, queue); } } } private Optional<Long> projectId(ApplicationId applicationId) { return controller.applications().require(applicationId).deploymentJobs().projectId(); } private static boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } }
class PolledBuildSystem implements BuildSystem { private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName()); private final Controller controller; private final CuratorDb curator; public PolledBuildSystem(Controller controller, CuratorDb curator) { this.controller = controller; this.curator = curator; } @Override public void addJob(ApplicationId application, JobType jobType, boolean first) { try (Lock lock = curator.lockJobQueues()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); if ( ! queue.contains(application)) { if (first) { queue.addFirst(application); } else { queue.add(application); } } curator.writeJobQueue(jobType, queue); } } @Override public List<BuildJob> jobs() { return getJobs(false); } @Override public List<BuildJob> takeJobsToRun() { return getJobs(true); } @Override public void removeJobs(ApplicationId application) { try (Lock lock = curator.lockJobQueues()) { for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); while (queue.remove(application)) { } curator.writeJobQueue(jobType, queue); } } } private Optional<Long> projectId(ApplicationId applicationId) { return controller.applications().require(applicationId).deploymentJobs().projectId(); } private static boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } }
No, because they are self-triggering. Basically, this only happens for applications that are not self-triggering and are missing project ID, and if that happens I want some noise in the logs. :-)
private List<BuildJob> getJobs(boolean removeFromQueue) { try (Lock lock = curator.lockJobQueues()) { List<BuildJob> jobsToRun = new ArrayList<>(); for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); for (ApplicationId a : queue) { ApplicationId application = removeFromQueue ? queue.poll() : a; Optional<Long> projectId = projectId(application); if (projectId.isPresent()) { jobsToRun.add(new BuildJob(projectId.get(), jobType.id())); } else { log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() + " because project ID is missing"); } if (removeFromQueue && isCapacityConstrained(jobType)) break; } if (removeFromQueue) curator.writeJobQueue(jobType, queue); } return Collections.unmodifiableList(jobsToRun); } }
log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() +
private List<BuildJob> getJobs(boolean removeFromQueue) { try (Lock lock = curator.lockJobQueues()) { List<BuildJob> jobsToRun = new ArrayList<>(); for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); for (ApplicationId a : queue) { ApplicationId application = removeFromQueue ? queue.poll() : a; Optional<Long> projectId = projectId(application); if (projectId.isPresent()) { jobsToRun.add(new BuildJob(projectId.get(), jobType.id())); } else { log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() + " because project ID is missing"); } if (removeFromQueue && isCapacityConstrained(jobType)) break; } if (removeFromQueue) curator.writeJobQueue(jobType, queue); } return Collections.unmodifiableList(jobsToRun); } }
class PolledBuildSystem implements BuildSystem { private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName()); private final Controller controller; private final CuratorDb curator; public PolledBuildSystem(Controller controller, CuratorDb curator) { this.controller = controller; this.curator = curator; } @Override public void addJob(ApplicationId application, JobType jobType, boolean first) { try (Lock lock = curator.lockJobQueues()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); if ( ! queue.contains(application)) { if (first) { queue.addFirst(application); } else { queue.add(application); } } curator.writeJobQueue(jobType, queue); } } @Override public List<BuildJob> jobs() { return getJobs(false); } @Override public List<BuildJob> takeJobsToRun() { return getJobs(true); } @Override public void removeJobs(ApplicationId application) { try (Lock lock = curator.lockJobQueues()) { for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); while (queue.remove(application)) { } curator.writeJobQueue(jobType, queue); } } } private Optional<Long> projectId(ApplicationId applicationId) { return controller.applications().require(applicationId).deploymentJobs().projectId(); } private static boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } }
class PolledBuildSystem implements BuildSystem { private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName()); private final Controller controller; private final CuratorDb curator; public PolledBuildSystem(Controller controller, CuratorDb curator) { this.controller = controller; this.curator = curator; } @Override public void addJob(ApplicationId application, JobType jobType, boolean first) { try (Lock lock = curator.lockJobQueues()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); if ( ! queue.contains(application)) { if (first) { queue.addFirst(application); } else { queue.add(application); } } curator.writeJobQueue(jobType, queue); } } @Override public List<BuildJob> jobs() { return getJobs(false); } @Override public List<BuildJob> takeJobsToRun() { return getJobs(true); } @Override public void removeJobs(ApplicationId application) { try (Lock lock = curator.lockJobQueues()) { for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); while (queue.remove(application)) { } curator.writeJobQueue(jobType, queue); } } } private Optional<Long> projectId(ApplicationId applicationId) { return controller.applications().require(applicationId).deploymentJobs().projectId(); } private static boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } }
Sounds perfectly reasonable. Question answered :]
private List<BuildJob> getJobs(boolean removeFromQueue) { try (Lock lock = curator.lockJobQueues()) { List<BuildJob> jobsToRun = new ArrayList<>(); for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); for (ApplicationId a : queue) { ApplicationId application = removeFromQueue ? queue.poll() : a; Optional<Long> projectId = projectId(application); if (projectId.isPresent()) { jobsToRun.add(new BuildJob(projectId.get(), jobType.id())); } else { log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() + " because project ID is missing"); } if (removeFromQueue && isCapacityConstrained(jobType)) break; } if (removeFromQueue) curator.writeJobQueue(jobType, queue); } return Collections.unmodifiableList(jobsToRun); } }
log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() +
private List<BuildJob> getJobs(boolean removeFromQueue) { try (Lock lock = curator.lockJobQueues()) { List<BuildJob> jobsToRun = new ArrayList<>(); for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); for (ApplicationId a : queue) { ApplicationId application = removeFromQueue ? queue.poll() : a; Optional<Long> projectId = projectId(application); if (projectId.isPresent()) { jobsToRun.add(new BuildJob(projectId.get(), jobType.id())); } else { log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() + " because project ID is missing"); } if (removeFromQueue && isCapacityConstrained(jobType)) break; } if (removeFromQueue) curator.writeJobQueue(jobType, queue); } return Collections.unmodifiableList(jobsToRun); } }
class PolledBuildSystem implements BuildSystem { private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName()); private final Controller controller; private final CuratorDb curator; public PolledBuildSystem(Controller controller, CuratorDb curator) { this.controller = controller; this.curator = curator; } @Override public void addJob(ApplicationId application, JobType jobType, boolean first) { try (Lock lock = curator.lockJobQueues()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); if ( ! queue.contains(application)) { if (first) { queue.addFirst(application); } else { queue.add(application); } } curator.writeJobQueue(jobType, queue); } } @Override public List<BuildJob> jobs() { return getJobs(false); } @Override public List<BuildJob> takeJobsToRun() { return getJobs(true); } @Override public void removeJobs(ApplicationId application) { try (Lock lock = curator.lockJobQueues()) { for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); while (queue.remove(application)) { } curator.writeJobQueue(jobType, queue); } } } private Optional<Long> projectId(ApplicationId applicationId) { return controller.applications().require(applicationId).deploymentJobs().projectId(); } private static boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } }
class PolledBuildSystem implements BuildSystem { private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName()); private final Controller controller; private final CuratorDb curator; public PolledBuildSystem(Controller controller, CuratorDb curator) { this.controller = controller; this.curator = curator; } @Override public void addJob(ApplicationId application, JobType jobType, boolean first) { try (Lock lock = curator.lockJobQueues()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); if ( ! queue.contains(application)) { if (first) { queue.addFirst(application); } else { queue.add(application); } } curator.writeJobQueue(jobType, queue); } } @Override public List<BuildJob> jobs() { return getJobs(false); } @Override public List<BuildJob> takeJobsToRun() { return getJobs(true); } @Override public void removeJobs(ApplicationId application) { try (Lock lock = curator.lockJobQueues()) { for (JobType jobType : JobType.values()) { Deque<ApplicationId> queue = curator.readJobQueue(jobType); while (queue.remove(application)) { } curator.writeJobQueue(jobType, queue); } } } private Optional<Long> projectId(ApplicationId applicationId) { return controller.applications().require(applicationId).deploymentJobs().projectId(); } private static boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } }
What's the point of this `.map()` ?
public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException { return new VisitResult(Optional.of("token"), "List of json docs, cont token " + options.continuation.map(a->a).orElse("not set") + ", doc selection: '" + documentSelection + "'" + options.wantedDocumentCount.map(n -> String.format(", min docs returned: %d", n)).orElse("")); }
+ options.continuation.map(a->a).orElse("not set") + ", doc selection: '"
public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException { return new VisitResult(Optional.of("token"), "List of json docs, cont token " + options.continuation.orElse("not set") + ", doc selection: '" + documentSelection + "'" + options.wantedDocumentCount.map(n -> String.format(", min docs returned: %d", n)).orElse("")); }
class MockedOperationHandler implements OperationHandler { StringBuilder log = new StringBuilder(); int deleteCount = 0; @Override @Override public void put(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("PUT: " + data.getDocument().getId()); log.append(data.getDocument().getBody().toString()); } @Override public void update(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("UPDATE: " + data.getDocumentUpdate().getId()); log.append(data.getDocumentUpdate().getFieldUpdates().toString()); if (data.getDocumentUpdate().getCreateIfNonExistent()) { log.append("[CREATE IF NON EXISTENT IS TRUE]"); } } @Override public void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException { deleteCount++; if (deleteCount == 2) { String theLog = log.toString(); log = new StringBuilder(); deleteCount = 0; throw new RestApiException(Response.createErrorResponse(666, theLog, RestUri.apiErrorCodes.ERROR_ID_BASIC_USAGE)); } log.append("DELETE: " + restUri.generateFullId()); } @Override public Optional<String> get(RestUri restUri) throws RestApiException { log.append("GET: " + restUri.generateFullId()); return Optional.empty(); } }
class MockedOperationHandler implements OperationHandler { StringBuilder log = new StringBuilder(); int deleteCount = 0; @Override @Override public void put(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("PUT: " + data.getDocument().getId()); log.append(data.getDocument().getBody().toString()); } @Override public void update(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("UPDATE: " + data.getDocumentUpdate().getId()); log.append(data.getDocumentUpdate().getFieldUpdates().toString()); if (data.getDocumentUpdate().getCreateIfNonExistent()) { log.append("[CREATE IF NON EXISTENT IS TRUE]"); } } @Override public void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException { deleteCount++; if (deleteCount == 2) { String theLog = log.toString(); log = new StringBuilder(); deleteCount = 0; throw new RestApiException(Response.createErrorResponse(666, theLog, RestUri.apiErrorCodes.ERROR_ID_BASIC_USAGE)); } log.append("DELETE: " + restUri.generateFullId()); } @Override public Optional<String> get(RestUri restUri) throws RestApiException { log.append("GET: " + restUri.generateFullId()); return Optional.empty(); } }
Redundant identity transform. I'll remove it.
public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException { return new VisitResult(Optional.of("token"), "List of json docs, cont token " + options.continuation.map(a->a).orElse("not set") + ", doc selection: '" + documentSelection + "'" + options.wantedDocumentCount.map(n -> String.format(", min docs returned: %d", n)).orElse("")); }
+ options.continuation.map(a->a).orElse("not set") + ", doc selection: '"
public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException { return new VisitResult(Optional.of("token"), "List of json docs, cont token " + options.continuation.orElse("not set") + ", doc selection: '" + documentSelection + "'" + options.wantedDocumentCount.map(n -> String.format(", min docs returned: %d", n)).orElse("")); }
class MockedOperationHandler implements OperationHandler { StringBuilder log = new StringBuilder(); int deleteCount = 0; @Override @Override public void put(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("PUT: " + data.getDocument().getId()); log.append(data.getDocument().getBody().toString()); } @Override public void update(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("UPDATE: " + data.getDocumentUpdate().getId()); log.append(data.getDocumentUpdate().getFieldUpdates().toString()); if (data.getDocumentUpdate().getCreateIfNonExistent()) { log.append("[CREATE IF NON EXISTENT IS TRUE]"); } } @Override public void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException { deleteCount++; if (deleteCount == 2) { String theLog = log.toString(); log = new StringBuilder(); deleteCount = 0; throw new RestApiException(Response.createErrorResponse(666, theLog, RestUri.apiErrorCodes.ERROR_ID_BASIC_USAGE)); } log.append("DELETE: " + restUri.generateFullId()); } @Override public Optional<String> get(RestUri restUri) throws RestApiException { log.append("GET: " + restUri.generateFullId()); return Optional.empty(); } }
class MockedOperationHandler implements OperationHandler { StringBuilder log = new StringBuilder(); int deleteCount = 0; @Override @Override public void put(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("PUT: " + data.getDocument().getId()); log.append(data.getDocument().getBody().toString()); } @Override public void update(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException { log.append("UPDATE: " + data.getDocumentUpdate().getId()); log.append(data.getDocumentUpdate().getFieldUpdates().toString()); if (data.getDocumentUpdate().getCreateIfNonExistent()) { log.append("[CREATE IF NON EXISTENT IS TRUE]"); } } @Override public void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException { deleteCount++; if (deleteCount == 2) { String theLog = log.toString(); log = new StringBuilder(); deleteCount = 0; throw new RestApiException(Response.createErrorResponse(666, theLog, RestUri.apiErrorCodes.ERROR_ID_BASIC_USAGE)); } log.append("DELETE: " + restUri.generateFullId()); } @Override public Optional<String> get(RestUri restUri) throws RestApiException { log.append("GET: " + restUri.generateFullId()); return Optional.empty(); } }
ytracecleaner is not in use anymore
Optional<String> lookup(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "filedistributorservice": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": case "ytracecleaner": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "docprocservice": case "container-clustercontroller": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: log.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
case "ytracecleaner":
Optional<String> lookup(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "filedistributorservice": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "docprocservice": case "container-clustercontroller": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: log.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
class SlobrokMonitor2 implements AutoCloseable { public static final String SLOBROK_RPC_PORT_TAG = "rpc"; private static final Logger log = Logger.getLogger(SlobrokMonitor2.class.getName()); private final SlobrokList slobrokList; private final Mirror mirror; SlobrokMonitor2() { this(new SlobrokList()); } SlobrokMonitor2(SlobrokList slobrokList, Mirror mirror) { this.slobrokList = slobrokList; this.mirror = mirror; } private SlobrokMonitor2(SlobrokList slobrokList) { this(slobrokList, new Mirror(new Supervisor(new Transport()), slobrokList)); } void updateSlobrokList(SuperModel superModel) { List<String> slobrokSpecs = new ArrayList<>(); for (ApplicationInfo application : superModel.getAllApplicationInfos()) { for (HostInfo host : application.getModel().getHosts()) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (port.getTags().contains(SLOBROK_RPC_PORT_TAG)) { Spec spec = new Spec(host.getHostname(), port.getPort()); slobrokSpecs.add(spec.toString()); } } } } } slobrokList.setup(slobrokSpecs.toArray(new String[0])); } ServiceMonitorStatus getStatus(ServiceType serviceType, ConfigId configId) { Optional<String> slobrokServiceName = lookup(serviceType, configId); if (slobrokServiceName.isPresent()) { if (mirror.lookup(slobrokServiceName.get()).length != 0) { return ServiceMonitorStatus.UP; } else { return ServiceMonitorStatus.DOWN; } } else { return ServiceMonitorStatus.NOT_CHECKED; } } @Override public void close() { mirror.shutdown(); } }
class SlobrokMonitor2 implements AutoCloseable { public static final String SLOBROK_RPC_PORT_TAG = "rpc"; private static final Logger log = Logger.getLogger(SlobrokMonitor2.class.getName()); private final SlobrokList slobrokList; private final Mirror mirror; SlobrokMonitor2() { this(new SlobrokList()); } SlobrokMonitor2(SlobrokList slobrokList, Mirror mirror) { this.slobrokList = slobrokList; this.mirror = mirror; } private SlobrokMonitor2(SlobrokList slobrokList) { this(slobrokList, new Mirror(new Supervisor(new Transport()), slobrokList)); } void updateSlobrokList(SuperModel superModel) { List<String> slobrokSpecs = new ArrayList<>(); for (ApplicationInfo application : superModel.getAllApplicationInfos()) { for (HostInfo host : application.getModel().getHosts()) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (port.getTags().contains(SLOBROK_RPC_PORT_TAG)) { Spec spec = new Spec(host.getHostname(), port.getPort()); slobrokSpecs.add(spec.toString()); } } } } } slobrokList.setup(slobrokSpecs.toArray(new String[0])); } ServiceMonitorStatus getStatus(ServiceType serviceType, ConfigId configId) { Optional<String> slobrokServiceName = lookup(serviceType, configId); if (slobrokServiceName.isPresent()) { if (mirror.lookup(slobrokServiceName.get()).length != 0) { return ServiceMonitorStatus.UP; } else { return ServiceMonitorStatus.DOWN; } } else { return ServiceMonitorStatus.NOT_CHECKED; } } @Override public void close() { mirror.shutdown(); } }
Fixed
Optional<String> lookup(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "filedistributorservice": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": case "ytracecleaner": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "docprocservice": case "container-clustercontroller": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: log.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
case "ytracecleaner":
Optional<String> lookup(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "filedistributorservice": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "docprocservice": case "container-clustercontroller": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: log.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
class SlobrokMonitor2 implements AutoCloseable { public static final String SLOBROK_RPC_PORT_TAG = "rpc"; private static final Logger log = Logger.getLogger(SlobrokMonitor2.class.getName()); private final SlobrokList slobrokList; private final Mirror mirror; SlobrokMonitor2() { this(new SlobrokList()); } SlobrokMonitor2(SlobrokList slobrokList, Mirror mirror) { this.slobrokList = slobrokList; this.mirror = mirror; } private SlobrokMonitor2(SlobrokList slobrokList) { this(slobrokList, new Mirror(new Supervisor(new Transport()), slobrokList)); } void updateSlobrokList(SuperModel superModel) { List<String> slobrokSpecs = new ArrayList<>(); for (ApplicationInfo application : superModel.getAllApplicationInfos()) { for (HostInfo host : application.getModel().getHosts()) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (port.getTags().contains(SLOBROK_RPC_PORT_TAG)) { Spec spec = new Spec(host.getHostname(), port.getPort()); slobrokSpecs.add(spec.toString()); } } } } } slobrokList.setup(slobrokSpecs.toArray(new String[0])); } ServiceMonitorStatus getStatus(ServiceType serviceType, ConfigId configId) { Optional<String> slobrokServiceName = lookup(serviceType, configId); if (slobrokServiceName.isPresent()) { if (mirror.lookup(slobrokServiceName.get()).length != 0) { return ServiceMonitorStatus.UP; } else { return ServiceMonitorStatus.DOWN; } } else { return ServiceMonitorStatus.NOT_CHECKED; } } @Override public void close() { mirror.shutdown(); } }
class SlobrokMonitor2 implements AutoCloseable { public static final String SLOBROK_RPC_PORT_TAG = "rpc"; private static final Logger log = Logger.getLogger(SlobrokMonitor2.class.getName()); private final SlobrokList slobrokList; private final Mirror mirror; SlobrokMonitor2() { this(new SlobrokList()); } SlobrokMonitor2(SlobrokList slobrokList, Mirror mirror) { this.slobrokList = slobrokList; this.mirror = mirror; } private SlobrokMonitor2(SlobrokList slobrokList) { this(slobrokList, new Mirror(new Supervisor(new Transport()), slobrokList)); } void updateSlobrokList(SuperModel superModel) { List<String> slobrokSpecs = new ArrayList<>(); for (ApplicationInfo application : superModel.getAllApplicationInfos()) { for (HostInfo host : application.getModel().getHosts()) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (port.getTags().contains(SLOBROK_RPC_PORT_TAG)) { Spec spec = new Spec(host.getHostname(), port.getPort()); slobrokSpecs.add(spec.toString()); } } } } } slobrokList.setup(slobrokSpecs.toArray(new String[0])); } ServiceMonitorStatus getStatus(ServiceType serviceType, ConfigId configId) { Optional<String> slobrokServiceName = lookup(serviceType, configId); if (slobrokServiceName.isPresent()) { if (mirror.lookup(slobrokServiceName.get()).length != 0) { return ServiceMonitorStatus.UP; } else { return ServiceMonitorStatus.DOWN; } } else { return ServiceMonitorStatus.NOT_CHECKED; } } @Override public void close() { mirror.shutdown(); } }
Port number changes might cause issues during version upgrade, where some nodes run with old vespa version (using old config model) and some nodes run with new vespa version.
public int getPortCount() { return 3; }
return 3;
public int getPortCount() { return 5; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> { private final String name; private final NodeSpec nodeSpec; private final String clusterName; private final ContentNode contentNode; private final boolean flushOnShutdown; private final Optional<Tuning> tuning; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; this.contentNode = node; this.flushOnShutdown = flushOnShutdown; this.tuning = tuning; } @Override protected SearchNode doBuild(AbstractConfigProducer ancestor, Element producerSpec) { return new SearchNode(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning); } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> { private final String name; private final NodeSpec nodeSpec; private final String clusterName; private final ContentNode contentNode; private final boolean flushOnShutdown; private final Optional<Tuning> tuning; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; this.contentNode = node; this.flushOnShutdown = flushOnShutdown; this.tuning = tuning; } @Override protected SearchNode doBuild(AbstractConfigProducer ancestor, Element producerSpec) { return new SearchNode(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning); } }
Removal of slime messaging port should be reviewed by @havardpe.
public int getDistributionKey() { return distributionKey; }
return distributionKey;
public int getDistributionKey() { return distributionKey; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> { private final String name; private final NodeSpec nodeSpec; private final String clusterName; private final ContentNode contentNode; private final boolean flushOnShutdown; private final Optional<Tuning> tuning; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; this.contentNode = node; this.flushOnShutdown = flushOnShutdown; this.tuning = tuning; } @Override protected SearchNode doBuild(AbstractConfigProducer ancestor, Element producerSpec) { return new SearchNode(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning); } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> { private final String name; private final NodeSpec nodeSpec; private final String clusterName; private final ContentNode contentNode; private final boolean flushOnShutdown; private final Optional<Tuning> tuning; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; this.contentNode = node; this.flushOnShutdown = flushOnShutdown; this.tuning = tuning; } @Override protected SearchNode doBuild(AbstractConfigProducer ancestor, Element producerSpec) { return new SearchNode(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning); } }
I prefer having _filter_ and _map_ operations on separate lines to improve readability, but this is okay as the lambda operations are simple.
private Optional<String> secret(String keyname) { return Optional.of(keyname).filter(key -> !key.isEmpty()).map(secretStore::getSecret); }
return Optional.of(keyname).filter(key -> !key.isEmpty()).map(secretStore::getSecret);
private Optional<String> secret(String keyname) { return Optional.of(keyname).filter(key -> !key.isEmpty()).map(secretStore::getSecret); }
class ConnectorFactory { private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName()); private final ConnectorConfig connectorConfig; private final SslKeyStoreFactory sslKeyStoreFactory; private final SecretStore secretStore; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SslKeyStoreFactory sslKeyStoreFactory, SecretStore secretStore) { this.connectorConfig = connectorConfig; this.sslKeyStoreFactory = sslKeyStoreFactory; this.secretStore = secretStore; if (connectorConfig.ssl().enabled()) validateSslConfig(connectorConfig); } private static void validateSslConfig(ConnectorConfig config) { ConnectorConfig.Ssl ssl = config.ssl(); if (ssl.keyStoreType() == JKS) { if (! ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyStoreType() == PEM) { if (! ssl.keyStorePath().isEmpty()) throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch, Map<Path, FileChannel> keyStoreChannels) { ServerConnector connector; if (connectorConfig.ssl().enabled()) { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newSslConnectionFactory(keyStoreChannels), newHttpConnectionFactory()); } else { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newHttpConnectionFactory()); } connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); double soLingerTimeSeconds = connectorConfig.soLingerTime(); if (soLingerTimeSeconds == -1) { connector.setSoLingerTime(-1); } else { connector.setSoLingerTime((int)(soLingerTimeSeconds * 1000.0)); } connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); connector.setStopTimeout((long)(connectorConfig.stopTimeout() * 1000.0)); return connector; } private HttpConnectionFactory newHttpConnectionFactory() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (connectorConfig.ssl().enabled()) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return new HttpConnectionFactory(httpConfig); } private SslConnectionFactory newSslConnectionFactory(Map<Path, FileChannel> keyStoreChannels) { Ssl sslConfig = connectorConfig.ssl(); SslContextFactory factory = new SslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } if (!sslConfig.prng().isEmpty()) { factory.setSecureRandomAlgorithm(sslConfig.prng()); } if (!sslConfig.excludeProtocol().isEmpty()) { String[] prots = new String[sslConfig.excludeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.excludeProtocol(i).name(); } factory.setExcludeProtocols(prots); } if (!sslConfig.includeProtocol().isEmpty()) { String[] prots = new String[sslConfig.includeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.includeProtocol(i).name(); } factory.setIncludeProtocols(prots); } if (!sslConfig.excludeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.excludeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.excludeCipherSuite(i).name(); } factory.setExcludeCipherSuites(ciphs); } if (!sslConfig.includeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.includeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.includeCipherSuite(i).name(); } factory.setIncludeCipherSuites(ciphs); } Optional<String> keyDbPassword = secret(sslConfig.keyDbKey()); switch (sslConfig.keyStoreType()) { case PEM: factory.setKeyStore(getKeyStore(sslConfig.pemKeyStore(), keyStoreChannels)); if (keyDbPassword.isPresent()) log.warning("Encrypted PEM key stores are not supported."); break; case JKS: factory.setKeyStorePath(sslConfig.keyStorePath()); factory.setKeyStoreType(sslConfig.keyStoreType().toString()); factory.setKeyStorePassword(keyDbPassword.orElseThrow(passwordRequiredForJKSKeyStore("key"))); break; } if (!sslConfig.trustStorePath().isEmpty()) { factory.setTrustStorePath(sslConfig.trustStorePath()); factory.setTrustStoreType(sslConfig.trustStoreType().toString()); if (sslConfig.useTrustStorePassword()) factory.setTrustStorePassword(keyDbPassword.orElseThrow(passwordRequiredForJKSKeyStore("trust"))); } factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm()); factory.setProtocol(sslConfig.protocol()); return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString()); } /** Returns the secret password with the given name, or empty if the password name is null or empty */ @SuppressWarnings("ThrowableInstanceNeverThrown") private Supplier<RuntimeException> passwordRequiredForJKSKeyStore(String type) { return () -> new RuntimeException(String.format("Password is required for JKS %s store", type)); } private KeyStore getKeyStore(PemKeyStore pemKeyStore, Map<Path, FileChannel> keyStoreChannels) { Preconditions.checkArgument(!pemKeyStore.certificatePath().isEmpty(), "Missing certificate path."); Preconditions.checkArgument(!pemKeyStore.keyPath().isEmpty(), "Missing key path."); class KeyStoreReaderForPath implements AutoCloseable { private final Optional<FileChannel> channel; public final ReaderForPath readerForPath; KeyStoreReaderForPath(String pathString) { Path path = Paths.get(pathString); channel = Optional.ofNullable(keyStoreChannels.get(path)); readerForPath = new ReaderForPath(channel.map(this::getReader).orElseGet(() -> getReader(path)), path); } private Reader getReader(FileChannel channel) { try { channel.position(0); return Channels.newReader(channel, StandardCharsets.UTF_8.newDecoder(), -1); } catch (IOException e) { throw throwUnchecked(e); } } private Reader getReader(Path path) { try { return Files.newBufferedReader(path); } catch (IOException e) { throw new RuntimeException("Failed opening " + path, e); } } @Override public void close() { if (!channel.isPresent()) { closeQuietly(readerForPath.reader); } } } try (KeyStoreReaderForPath certificateReader = new KeyStoreReaderForPath(pemKeyStore.certificatePath()); KeyStoreReaderForPath keyReader = new KeyStoreReaderForPath(pemKeyStore.keyPath())) { SslKeyStore keyStore = sslKeyStoreFactory.createKeyStore(certificateReader.readerForPath, keyReader.readerForPath); return keyStore.loadJavaKeyStore(); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } public static class JDiscServerConnector extends ServerConnector { public static final String REQUEST_ATTRIBUTE = JDiscServerConnector.class.getName(); private final static Logger log = Logger.getLogger(JDiscServerConnector.class.getName()); private final Metric.Context metricCtx; private final ServerConnectionStatistics statistics; private final boolean tcpKeepAlive; private final boolean tcpNoDelay; private final ServerSocketChannel channelOpenedByActivator; private JDiscServerConnector(ConnectorConfig config, Metric metric, Server server, ServerSocketChannel channelOpenedByActivator, ConnectionFactory... factories) { super(server, factories); this.channelOpenedByActivator = channelOpenedByActivator; this.tcpKeepAlive = config.tcpKeepAliveEnabled(); this.tcpNoDelay = config.tcpNoDelay(); this.metricCtx = createMetricContext(config, metric); this.statistics = new ServerConnectionStatistics(); addBean(statistics); } private Metric.Context createMetricContext(ConnectorConfig config, Metric metric) { Map<String, Object> props = new TreeMap<>(); props.put(JettyHttpServer.Metrics.NAME_DIMENSION, config.name()); props.put(JettyHttpServer.Metrics.PORT_DIMENSION, config.listenPort()); return metric.createContext(props); } @Override protected void configure(final Socket socket) { super.configure(socket); try { socket.setKeepAlive(tcpKeepAlive); socket.setTcpNoDelay(tcpNoDelay); } catch (SocketException ignored) { } } @Override public void open() throws IOException { if (channelOpenedByActivator == null) { log.log(Level.INFO, "No channel set by activator, opening channel ourselves."); try { super.open(); } catch (RuntimeException e) { log.log(Level.SEVERE, "failed org.eclipse.jetty.server.Server open() with port "+getPort()); throw e; } return; } log.log(Level.INFO, "Using channel set by activator: " + channelOpenedByActivator); channelOpenedByActivator.socket().setReuseAddress(getReuseAddress()); int localPort = channelOpenedByActivator.socket().getLocalPort(); try { uglySetLocalPort(localPort); } catch (NoSuchFieldException | IllegalAccessException e) { throw new RuntimeException("Could not set local port.", e); } if (localPort <= 0) { throw new IOException("Server channel not bound"); } addBean(channelOpenedByActivator); channelOpenedByActivator.configureBlocking(true); addBean(channelOpenedByActivator); try { uglySetChannel(channelOpenedByActivator); } catch (NoSuchFieldException | IllegalAccessException e) { throw new RuntimeException("Could not set server channel.", e); } } private void uglySetLocalPort(int localPort) throws NoSuchFieldException, IllegalAccessException { Field localPortField = ServerConnector.class.getDeclaredField("_localPort"); localPortField.setAccessible(true); localPortField.set(this, localPort); } private void uglySetChannel(ServerSocketChannel channelOpenedByActivator) throws NoSuchFieldException, IllegalAccessException { Field acceptChannelField = ServerConnector.class.getDeclaredField("_acceptChannel"); acceptChannelField.setAccessible(true); acceptChannelField.set(this, channelOpenedByActivator); } public ServerConnectionStatistics getStatistics() { return statistics; } public Metric.Context getMetricContext() { return metricCtx; } public static JDiscServerConnector fromRequest(ServletRequest request) { return (JDiscServerConnector)request.getAttribute(REQUEST_ATTRIBUTE); } } }
class ConnectorFactory { private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName()); private final ConnectorConfig connectorConfig; private final SslKeyStoreFactory sslKeyStoreFactory; private final SecretStore secretStore; @Inject public ConnectorFactory(ConnectorConfig connectorConfig, SslKeyStoreFactory sslKeyStoreFactory, SecretStore secretStore) { this.connectorConfig = connectorConfig; this.sslKeyStoreFactory = sslKeyStoreFactory; this.secretStore = secretStore; if (connectorConfig.ssl().enabled()) validateSslConfig(connectorConfig); } private static void validateSslConfig(ConnectorConfig config) { ConnectorConfig.Ssl ssl = config.ssl(); if (ssl.keyStoreType() == JKS) { if (! ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty()) throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS."); } if (ssl.keyStoreType() == PEM) { if (! ssl.keyStorePath().isEmpty()) throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM"); } } public ConnectorConfig getConnectorConfig() { return connectorConfig; } public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch, Map<Path, FileChannel> keyStoreChannels) { ServerConnector connector; if (connectorConfig.ssl().enabled()) { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newSslConnectionFactory(keyStoreChannels), newHttpConnectionFactory()); } else { connector = new JDiscServerConnector(connectorConfig, metric, server, ch, newHttpConnectionFactory()); } connector.setPort(connectorConfig.listenPort()); connector.setName(connectorConfig.name()); connector.setAcceptQueueSize(connectorConfig.acceptQueueSize()); connector.setReuseAddress(connectorConfig.reuseAddress()); double soLingerTimeSeconds = connectorConfig.soLingerTime(); if (soLingerTimeSeconds == -1) { connector.setSoLingerTime(-1); } else { connector.setSoLingerTime((int)(soLingerTimeSeconds * 1000.0)); } connector.setIdleTimeout((long)(connectorConfig.idleTimeout() * 1000.0)); connector.setStopTimeout((long)(connectorConfig.stopTimeout() * 1000.0)); return connector; } private HttpConnectionFactory newHttpConnectionFactory() { HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSendDateHeader(true); httpConfig.setSendServerVersion(false); httpConfig.setSendXPoweredBy(false); httpConfig.setHeaderCacheSize(connectorConfig.headerCacheSize()); httpConfig.setOutputBufferSize(connectorConfig.outputBufferSize()); httpConfig.setRequestHeaderSize(connectorConfig.requestHeaderSize()); httpConfig.setResponseHeaderSize(connectorConfig.responseHeaderSize()); if (connectorConfig.ssl().enabled()) { httpConfig.addCustomizer(new SecureRequestCustomizer()); } return new HttpConnectionFactory(httpConfig); } private SslConnectionFactory newSslConnectionFactory(Map<Path, FileChannel> keyStoreChannels) { Ssl sslConfig = connectorConfig.ssl(); SslContextFactory factory = new SslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } if (!sslConfig.prng().isEmpty()) { factory.setSecureRandomAlgorithm(sslConfig.prng()); } if (!sslConfig.excludeProtocol().isEmpty()) { String[] prots = new String[sslConfig.excludeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.excludeProtocol(i).name(); } factory.setExcludeProtocols(prots); } if (!sslConfig.includeProtocol().isEmpty()) { String[] prots = new String[sslConfig.includeProtocol().size()]; for (int i = 0; i < prots.length; i++) { prots[i] = sslConfig.includeProtocol(i).name(); } factory.setIncludeProtocols(prots); } if (!sslConfig.excludeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.excludeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.excludeCipherSuite(i).name(); } factory.setExcludeCipherSuites(ciphs); } if (!sslConfig.includeCipherSuite().isEmpty()) { String[] ciphs = new String[sslConfig.includeCipherSuite().size()]; for (int i = 0; i < ciphs.length; i++) { ciphs[i] = sslConfig.includeCipherSuite(i).name(); } factory.setIncludeCipherSuites(ciphs); } Optional<String> keyDbPassword = secret(sslConfig.keyDbKey()); switch (sslConfig.keyStoreType()) { case PEM: factory.setKeyStore(getKeyStore(sslConfig.pemKeyStore(), keyStoreChannels)); if (keyDbPassword.isPresent()) log.warning("Encrypted PEM key stores are not supported."); break; case JKS: factory.setKeyStorePath(sslConfig.keyStorePath()); factory.setKeyStoreType(sslConfig.keyStoreType().toString()); factory.setKeyStorePassword(keyDbPassword.orElseThrow(passwordRequiredForJKSKeyStore("key"))); break; } if (!sslConfig.trustStorePath().isEmpty()) { factory.setTrustStorePath(sslConfig.trustStorePath()); factory.setTrustStoreType(sslConfig.trustStoreType().toString()); if (sslConfig.useTrustStorePassword()) factory.setTrustStorePassword(keyDbPassword.orElseThrow(passwordRequiredForJKSKeyStore("trust"))); } factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm()); factory.setProtocol(sslConfig.protocol()); return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString()); } /** Returns the secret password with the given name, or empty if the password name is null or empty */ @SuppressWarnings("ThrowableInstanceNeverThrown") private Supplier<RuntimeException> passwordRequiredForJKSKeyStore(String type) { return () -> new RuntimeException(String.format("Password is required for JKS %s store", type)); } private KeyStore getKeyStore(PemKeyStore pemKeyStore, Map<Path, FileChannel> keyStoreChannels) { Preconditions.checkArgument(!pemKeyStore.certificatePath().isEmpty(), "Missing certificate path."); Preconditions.checkArgument(!pemKeyStore.keyPath().isEmpty(), "Missing key path."); class KeyStoreReaderForPath implements AutoCloseable { private final Optional<FileChannel> channel; public final ReaderForPath readerForPath; KeyStoreReaderForPath(String pathString) { Path path = Paths.get(pathString); channel = Optional.ofNullable(keyStoreChannels.get(path)); readerForPath = new ReaderForPath(channel.map(this::getReader).orElseGet(() -> getReader(path)), path); } private Reader getReader(FileChannel channel) { try { channel.position(0); return Channels.newReader(channel, StandardCharsets.UTF_8.newDecoder(), -1); } catch (IOException e) { throw throwUnchecked(e); } } private Reader getReader(Path path) { try { return Files.newBufferedReader(path); } catch (IOException e) { throw new RuntimeException("Failed opening " + path, e); } } @Override public void close() { if (!channel.isPresent()) { closeQuietly(readerForPath.reader); } } } try (KeyStoreReaderForPath certificateReader = new KeyStoreReaderForPath(pemKeyStore.certificatePath()); KeyStoreReaderForPath keyReader = new KeyStoreReaderForPath(pemKeyStore.keyPath())) { SslKeyStore keyStore = sslKeyStoreFactory.createKeyStore(certificateReader.readerForPath, keyReader.readerForPath); return keyStore.loadJavaKeyStore(); } catch (Exception e) { throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e); } } public static class JDiscServerConnector extends ServerConnector { public static final String REQUEST_ATTRIBUTE = JDiscServerConnector.class.getName(); private final static Logger log = Logger.getLogger(JDiscServerConnector.class.getName()); private final Metric.Context metricCtx; private final ServerConnectionStatistics statistics; private final boolean tcpKeepAlive; private final boolean tcpNoDelay; private final ServerSocketChannel channelOpenedByActivator; private JDiscServerConnector(ConnectorConfig config, Metric metric, Server server, ServerSocketChannel channelOpenedByActivator, ConnectionFactory... factories) { super(server, factories); this.channelOpenedByActivator = channelOpenedByActivator; this.tcpKeepAlive = config.tcpKeepAliveEnabled(); this.tcpNoDelay = config.tcpNoDelay(); this.metricCtx = createMetricContext(config, metric); this.statistics = new ServerConnectionStatistics(); addBean(statistics); } private Metric.Context createMetricContext(ConnectorConfig config, Metric metric) { Map<String, Object> props = new TreeMap<>(); props.put(JettyHttpServer.Metrics.NAME_DIMENSION, config.name()); props.put(JettyHttpServer.Metrics.PORT_DIMENSION, config.listenPort()); return metric.createContext(props); } @Override protected void configure(final Socket socket) { super.configure(socket); try { socket.setKeepAlive(tcpKeepAlive); socket.setTcpNoDelay(tcpNoDelay); } catch (SocketException ignored) { } } @Override public void open() throws IOException { if (channelOpenedByActivator == null) { log.log(Level.INFO, "No channel set by activator, opening channel ourselves."); try { super.open(); } catch (RuntimeException e) { log.log(Level.SEVERE, "failed org.eclipse.jetty.server.Server open() with port "+getPort()); throw e; } return; } log.log(Level.INFO, "Using channel set by activator: " + channelOpenedByActivator); channelOpenedByActivator.socket().setReuseAddress(getReuseAddress()); int localPort = channelOpenedByActivator.socket().getLocalPort(); try { uglySetLocalPort(localPort); } catch (NoSuchFieldException | IllegalAccessException e) { throw new RuntimeException("Could not set local port.", e); } if (localPort <= 0) { throw new IOException("Server channel not bound"); } addBean(channelOpenedByActivator); channelOpenedByActivator.configureBlocking(true); addBean(channelOpenedByActivator); try { uglySetChannel(channelOpenedByActivator); } catch (NoSuchFieldException | IllegalAccessException e) { throw new RuntimeException("Could not set server channel.", e); } } private void uglySetLocalPort(int localPort) throws NoSuchFieldException, IllegalAccessException { Field localPortField = ServerConnector.class.getDeclaredField("_localPort"); localPortField.setAccessible(true); localPortField.set(this, localPort); } private void uglySetChannel(ServerSocketChannel channelOpenedByActivator) throws NoSuchFieldException, IllegalAccessException { Field acceptChannelField = ServerConnector.class.getDeclaredField("_acceptChannel"); acceptChannelField.setAccessible(true); acceptChannelField.set(this, channelOpenedByActivator); } public ServerConnectionStatistics getStatistics() { return statistics; } public Metric.Context getMetricContext() { return metricCtx; } public static JDiscServerConnector fromRequest(ServletRequest request) { return (JDiscServerConnector)request.getAttribute(REQUEST_ATTRIBUTE); } } }
This is a lot of logic to add in ApplicationApiHandler. I suggest moving it into the model - how about just let jobstatus() always return the jobs sorted?
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<DeploymentJobs.JobType> jobs = jobsFrom(application.deploymentSpec()); List<JobStatus> sortedStatus = application.deploymentJobs().jobStatus().entrySet().stream() .sorted(Comparator.comparingInt(kv -> jobs.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : sortedStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Zone> declaredProductionZones = application.deploymentSpec().zones().stream() .filter(declaredZone -> declaredZone.environment() == Environment.prod && declaredZone.region().isPresent()) .map(declaredZone -> new Zone(declaredZone.environment(), declaredZone.region().get())) .collect(Collectors.toList()); List<Deployment> sortedDeployments = application.deployments().entrySet().stream() .sorted(Comparator.comparingInt(kv -> declaredProductionZones.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : sortedDeployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
Map<String, RotationStatus> rotationHealthStatus =
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } Map<DeploymentJobs.JobType, JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : jobStatus.values()) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); Map<Zone, Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : deployments.values()) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } /** Returns jobs for given deployment spec, in the order they are declared */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec deploymentSpec) { return deploymentSpec.steps().stream() .flatMap(step -> jobsFrom(step).stream()) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns jobs for the given step */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec.Step step) { return step.zones().stream() .map(zone -> DeploymentJobs.JobType.from(controller.system(), zone.environment(), zone.region().orElse(null))) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } }
Agreed, will change.
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<DeploymentJobs.JobType> jobs = jobsFrom(application.deploymentSpec()); List<JobStatus> sortedStatus = application.deploymentJobs().jobStatus().entrySet().stream() .sorted(Comparator.comparingInt(kv -> jobs.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : sortedStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Zone> declaredProductionZones = application.deploymentSpec().zones().stream() .filter(declaredZone -> declaredZone.environment() == Environment.prod && declaredZone.region().isPresent()) .map(declaredZone -> new Zone(declaredZone.environment(), declaredZone.region().get())) .collect(Collectors.toList()); List<Deployment> sortedDeployments = application.deployments().entrySet().stream() .sorted(Comparator.comparingInt(kv -> declaredProductionZones.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : sortedDeployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
Map<String, RotationStatus> rotationHealthStatus =
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } Map<DeploymentJobs.JobType, JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : jobStatus.values()) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); Map<Zone, Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : deployments.values()) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } /** Returns jobs for given deployment spec, in the order they are declared */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec deploymentSpec) { return deploymentSpec.steps().stream() .flatMap(step -> jobsFrom(step).stream()) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns jobs for the given step */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec.Step step) { return step.zones().stream() .map(zone -> DeploymentJobs.JobType.from(controller.system(), zone.environment(), zone.region().orElse(null))) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } }
Right, I actually thought about doing that, but the problem is that you need access to deployment spec to sort and it's not available in `DeploymentJobs`. Passing `DeploymentSpec` down to `DeploymentJobs` and storing a reference to it there just for the sake of sorting seems wrong. Suggestions?
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<DeploymentJobs.JobType> jobs = jobsFrom(application.deploymentSpec()); List<JobStatus> sortedStatus = application.deploymentJobs().jobStatus().entrySet().stream() .sorted(Comparator.comparingInt(kv -> jobs.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : sortedStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Zone> declaredProductionZones = application.deploymentSpec().zones().stream() .filter(declaredZone -> declaredZone.environment() == Environment.prod && declaredZone.region().isPresent()) .map(declaredZone -> new Zone(declaredZone.environment(), declaredZone.region().get())) .collect(Collectors.toList()); List<Deployment> sortedDeployments = application.deployments().entrySet().stream() .sorted(Comparator.comparingInt(kv -> declaredProductionZones.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : sortedDeployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
Map<String, RotationStatus> rotationHealthStatus =
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } Map<DeploymentJobs.JobType, JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : jobStatus.values()) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); Map<Zone, Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : deployments.values()) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } /** Returns jobs for given deployment spec, in the order they are declared */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec deploymentSpec) { return deploymentSpec.steps().stream() .flatMap(step -> jobsFrom(step).stream()) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns jobs for the given step */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec.Step step) { return step.zones().stream() .map(zone -> DeploymentJobs.JobType.from(controller.system(), zone.environment(), zone.region().orElse(null))) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } }
DeploymentJobs is owned by an Application, and cannot exist outside of an Application. You just need to pass the owning application to DeploymentJobs when it is created. Then you can do owner.deploymentSpec() to access it.
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } List<DeploymentJobs.JobType> jobs = jobsFrom(application.deploymentSpec()); List<JobStatus> sortedStatus = application.deploymentJobs().jobStatus().entrySet().stream() .sorted(Comparator.comparingInt(kv -> jobs.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : sortedStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); List<Zone> declaredProductionZones = application.deploymentSpec().zones().stream() .filter(declaredZone -> declaredZone.environment() == Environment.prod && declaredZone.region().isPresent()) .map(declaredZone -> new Zone(declaredZone.environment(), declaredZone.region().get())) .collect(Collectors.toList()); List<Deployment> sortedDeployments = application.deployments().entrySet().stream() .sorted(Comparator.comparingInt(kv -> declaredProductionZones.indexOf(kv.getKey()))) .map(Map.Entry::getValue) .collect(Collectors.toList()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : sortedDeployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
Map<String, RotationStatus> rotationHealthStatus =
private HttpResponse application(String tenantName, String applicationName, Path path, HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); if (application.deploying().isPresent()) { Cursor deployingObject = response.setObject("deploying"); if (application.deploying().get() instanceof Change.VersionChange) deployingObject.setString("version", ((Change.VersionChange)application.deploying().get()).version().toString()); else if (((Change.ApplicationChange)application.deploying().get()).revision().isPresent()) toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision")); } Map<DeploymentJobs.JobType, JobStatus> jobStatus = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus()); Cursor deploymentsArray = response.setArray("deploymentJobs"); for (JobStatus job : jobStatus.values()) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().id()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } response.setString("compileVersion", application.compileVersion(controller).toFullString()); Cursor globalRotationsArray = response.setArray("globalRotations"); Set<URI> rotations = controller.getRotationUris(applicationId); Map<String, RotationStatus> rotationHealthStatus = rotations.isEmpty() ? Collections.emptyMap() : controller.getHealthStatus(rotations.iterator().next().getHost()); for (URI rotation : rotations) globalRotationsArray.addString(rotation.toString()); Map<Zone, Deployment> deployments = controller.applications().deploymentTrigger() .deploymentOrder() .sortBy(application.deploymentSpec().zones(), application.deployments()); Cursor instancesArray = response.setArray("instances"); for (Deployment deployment : deployments.values()) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if ( ! rotations.isEmpty()) setRotationStatus(deployment, rotationHealthStatus, deploymentObject); deploymentObject.setString("url", withPath(path.asString() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } try { MetricsService.ApplicationMetrics metrics = controller.metricsService().getApplicationMetrics(applicationId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", metrics.queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", metrics.writeServiceQuality()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } /** Returns jobs for given deployment spec, in the order they are declared */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec deploymentSpec) { return deploymentSpec.steps().stream() .flatMap(step -> jobsFrom(step).stream()) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } /** Returns jobs for the given step */ private List<DeploymentJobs.JobType> jobsFrom(DeploymentSpec.Step step) { return step.zones().stream() .map(zone -> DeploymentJobs.JobType.from(controller.system(), zone.environment(), zone.region().orElse(null))) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final Authorizer authorizer; public ApplicationApiHandler(Executor executor, AccessLog accessLog, Controller controller, Authorizer authorizer) { super(executor, accessLog); this.controller = controller; this.authorizer = authorizer; } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athensDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), path, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/converge")) return waitForConvergence(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/migrateTenantToAthens")) return migrateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploy(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/log")) return log(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse root(HttpRequest request) { return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = userFrom(request) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().map(Tenant::getId).anyMatch(id -> id.isTenantFor(userId))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().get())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athensDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthensDomain athensDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athensDomain.id()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse cookieFreshness(HttpRequest request) { Slime slime = new Slime(); String passThruHeader = request.getHeader(SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_HEADER_FIELD); slime.setObject().setBool("shouldRefreshCookie", ! SetBouncerPassthruHeaderFilter.BOUNCER_PASSTHRU_COOKIE_OK.equals(passThruHeader)); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); return new SlimeJsonResponse(toSlime(tenant.get(), request, true)); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), new Zone(Environment.from(environment), RegionName.from(region))); Deployment deployment = application.deployments().get(deploymentId.zone()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zone()); Optional<InstanceEndpoints> deploymentEndpoints = controller.applications().getDeploymentEndpoints(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor serviceUrlArray = response.setArray("serviceUrls"); if (deploymentEndpoints.isPresent()) { for (URI uri : deploymentEndpoints.get().getContainerEndpoints()) serviceUrlArray.addString(uri.toString()); } response.setString("nodes", withPath("/zone/v2/" + environment + "/" + region + "/nodes/v2/node/?&recursive=true&application=" + tenantName + "." + applicationName + "." + instanceName, request.getUri()).toString()); Environment env = Environment.from(environment); RegionName regionName = RegionName.from(region); URI elkUrl = controller.getElkUri(env, regionName, deploymentId); if (elkUrl != null) response.setString("elkUrl", elkUrl.toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.revision().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); Optional<Duration> deploymentTimeToLive = controller.zoneRegistry().getDeploymentTimeToLive(Environment.from(environment), RegionName.from(region)); deploymentTimeToLive.ifPresent(duration -> response.setLong("expiryTimeEpochMs", deployment.at().plus(duration).toEpochMilli())); application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.revision().source(), response); com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName); Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region)); try { ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId); Cursor costObject = response.setObject("cost"); CostJsonModelAdapter.toSlime(appCost, costObject); } catch (NotFoundCheckedException nfce) { log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage()); } try { MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting Yamas metrics", Exceptions.toMessageString(e)); } return new SlimeJsonResponse(slime); } private void toSlime(ApplicationRevision revision, Cursor object) { object.setString("hash", revision.id()); if (revision.source().isPresent()) sourceRevisionToSlime(revision.source(), object.setObject("source")); } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId.zone().environment(), deploymentId.zone().region(), deploymentId.applicationId()); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if (!existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = authorizer.getUserId(request).toString(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endPointStatus = new EndpointStatus(status, reason, agent, timestamp); DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); try { List<String> rotations = controller.applications().setGlobalRotationStatus(deploymentId, endPointStatus); return new MessageResponse(String.format("Rotations %s successfully set to %s service", rotations.toString(), inService ? "in" : "out of")); } catch (IOException e) { return ErrorResponse.internalServerError("Unable to alter rotation status: " + e.getMessage()); } } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); Slime slime = new Slime(); Cursor c1 = slime.setObject().setArray("globalrotationoverride"); try { Map<String, EndpointStatus> rotations = controller.applications().getGlobalRotationStatus(deploymentId); for (String rotation : rotations.keySet()) { EndpointStatus currentStatus = rotations.get(rotation); c1.addString(rotation); Cursor c2 = c1.addObject(); c2.setString("status", currentStatus.getStatus().name()); c2.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); c2.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); c2.setLong("timestamp", currentStatus.getEpoch()); } } catch (IOException e) { return ErrorResponse.internalServerError("Unable to get rotation status: " + e.getMessage()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Set<URI> rotations = controller.getRotationUris(applicationId); if (rotations.isEmpty()) throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'"); Slime slime = new Slime(); Cursor response = slime.setObject(); Map<String, RotationStatus> rotationHealthStatus = controller.getHealthStatus(rotations.iterator().next().getHost()); for (String rotationEndpoint : rotationHealthStatus.keySet()) { if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) { Cursor bcpStatusObject = response.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", rotationHealthStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name()); } } return new SlimeJsonResponse(slime); } private HttpResponse waitForConvergence(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { return new JacksonJsonResponse(controller.waitForConfigConvergence(new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))), asLong(request.getProperty("timeout"), 1000))); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(new Zone(Environment.from(environment), RegionName.from(region)), new com.yahoo.config.provision.ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.getConfigServerUris(Environment.from(environment), RegionName.from(region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<String> username = userFrom(request); if ( ! username.isPresent() ) throw new ForbiddenException("Not authenticated."); try { controller.tenants().createUserTenant(username.get()); return new MessageResponse("Created user '" + username.get() + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<Tenant> existingTenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! existingTenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");; Inspector requestData = toSlime(request.getData()).get(); authorizer.throwIfUnauthorized(existingTenant.get().getId(), request); Tenant updatedTenant; switch (existingTenant.get().tenantType()) { case USER: { throw new BadRequestException("Cannot set property or OpsDB user group for user tenant"); } case OPSDB: { UserGroup userGroup = new UserGroup(mandatory("userGroup", requestData).asString()); updatedTenant = Tenant.createOpsDbTenant(new TenantId(tenantName), userGroup, new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotSuperUserOrPartOfOpsDbGroup(userGroup, request); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } case ATHENS: { if (requestData.field("userGroup").valid()) throw new BadRequestException("Cannot set OpsDB user group to Athens tenant"); updatedTenant = Tenant.createAthensTenant(new TenantId(tenantName), new AthensDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); controller.tenants().updateTenant(updatedTenant, authorizer.getNToken(request)); break; } default: { throw new BadRequestException("Unknown tenant type: " + existingTenant.get().tenantType()); } } return new SlimeJsonResponse(toSlime(updatedTenant, request, true)); } private HttpResponse createTenant(String tenantName, HttpRequest request) { if (new TenantId(tenantName).isUser()) return ErrorResponse.badRequest("Use User API to create user tenants."); Inspector requestData = toSlime(request.getData()).get(); Tenant tenant = new Tenant(new TenantId(tenantName), optional("userGroup", requestData).map(UserGroup::new), optional("property", requestData).map(Property::new), optional("athensDomain", requestData).map(AthensDomain::new), optional("propertyId", requestData).map(PropertyId::new)); if (tenant.isOpsDbTenant()) throwIfNotSuperUserOrPartOfOpsDbGroup(new UserGroup(mandatory("userGroup", requestData).asString()), request); if (tenant.isAthensTenant()) throwIfNotAthensDomainAdmin(new AthensDomain(mandatory("athensDomain", requestData).asString()), request); controller.tenants().addTenant(tenant, authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse migrateTenant(String tenantName, HttpRequest request) { TenantId tenantid = new TenantId(tenantName); Inspector requestData = toSlime(request.getData()).get(); AthensDomain tenantDomain = new AthensDomain(mandatory("athensDomain", requestData).asString()); Property property = new Property(mandatory("property", requestData).asString()); PropertyId propertyId = new PropertyId(mandatory("propertyId", requestData).asString()); authorizer.throwIfUnauthorized(tenantid, request); throwIfNotAthensDomainAdmin(tenantDomain, request); NToken nToken = authorizer.getNToken(request) .orElseThrow(() -> new BadRequestException("The NToken for a domain admin is required to migrate tenant to Athens")); Tenant tenant = controller.tenants().migrateTenantToAthens(tenantid, tenantDomain, propertyId, property, nToken); return new SlimeJsonResponse(toSlime(tenant, request, true)); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); Application application; try { application = controller.applications().createApplication(com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"), authorizer.getNToken(request)); } catch (ZmsException e) { if (e.getCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the last built application package, on a given version */ private HttpResponse deploy(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); if (application.deploying().isPresent()) throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " + application.deploying().get() + " is in progress"); Version version = decideDeployVersion(request); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions()); controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version)); return new MessageResponse("Triggered deployment of " + application + " on version " + version); } } /** Cancel any ongoing change for given application */ private HttpResponse cancelDeploy(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); try (Lock lock = controller.applications().lock(id)) { Application application = controller.applications().require(id); Optional<Change> change = application.deploying(); if (!change.isPresent()) { return new MessageResponse("No deployment in progress for " + application + " at this time"); } controller.applications().deploymentTrigger().cancelChange(id); return new MessageResponse("Cancelled " + change.get() + " for " + application); } } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); if (request.getProperty("hostname") != null) controller.applications().restartHost(deploymentId, new Hostname(request.getProperty("hostname"))); else controller.applications().restart(deploymentId); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * This returns and deletes recent error logs from this deployment, which is used by tenant deployment jobs to verify that * the application is working. It is called for all production zones, also those in which the application is not present, * and possibly before it is present, so failures are normal and expected. */ private HttpResponse log(String tenantName, String applicationName, String instanceName, String environment, String region) { try { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), new Zone(Environment.from(environment), RegionName.from(region))); return new JacksonJsonResponse(controller.grabLog(deploymentId)); } catch (RuntimeException e) { Slime slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); if ( ! dataParts.containsKey("applicationZip")) return ErrorResponse.badRequest("Missing required form part 'applicationZip'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); DeployAuthorizer deployAuthorizer = new DeployAuthorizer(controller.athens(), controller.zoneRegistry()); Tenant tenant = controller.tenants().tenant(new TenantId(tenantName)).orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); Principal principal = authorizer.getPrincipal(request); if (principal instanceof AthensPrincipal) { deployAuthorizer.throwIfUnauthorizedForDeploy(principal, Environment.from(environment), tenant, applicationId); } else { UserId userId = new UserId(principal.getName()); deployAuthorizer.throwIfUnauthorizedForDeploy( Environment.from(environment), userId, tenant, applicationId, optional("screwdriverBuildJob", deployOptions).map(ScrewdriverId::new)); } DeployOptions deployOptionsJsonClass = new DeployOptions(screwdriverBuildJobFromSlime(deployOptions.field("screwdriverBuildJob")), optional("vespaVersion", deployOptions).map(Version::new), deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deployApplication(applicationId, zone, new ApplicationPackage(dataParts.get("applicationZip")), deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result, dataParts.get("applicationZip").length)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); authorizer.throwIfUnauthorized(new TenantId(tenantName), request); controller.tenants().deleteTenant(new TenantId(tenantName), authorizer.getNToken(request)); return new SlimeJsonResponse(toSlime(tenant.get(), request, false)); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { authorizer.throwIfUnauthorized(new TenantId(tenantName), request); com.yahoo.config.provision.ApplicationId id = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, "default"); Application deleted = controller.applications().deleteApplication(id, authorizer.getNToken(request)); if (deleted == null) return ErrorResponse.notFoundError("Could not delete application '" + id + "': Application not found"); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); Zone zone = new Zone(Environment.from(environment), RegionName.from(region)); Deployment deployment = application.deployments().get(zone); if (deployment == null) { controller.applications().deactivate(application, zone); } else { controller.applications().deactivate(application, deployment, false); } return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private Optional<String> userFrom(HttpRequest request) { return authorizer.getPrincipalIfAny(request).map(Principal::getName); } private void toSlime(Tenant tenant, Cursor object, HttpRequest request, boolean listApplications) { object.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> object.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> object.setString("property", p.id())); tenant.getPropertyId().ifPresent(p -> object.setString("propertyId", p.toString())); tenant.getUserGroup().ifPresent(g -> object.setString("userGroup", g.id())); Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(TenantName.from(tenant.getId().id()))) { if (application.id().instance().isDefault()) toSlime(application, applicationArray.addObject(), request); } } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.getId().id()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenant.tenantType().name()); tenant.getAthensDomain().ifPresent(a -> metaData.setString("athensDomain", a.id())); tenant.getProperty().ifPresent(p -> metaData.setString("property", p.id())); tenant.getUserGroup().ifPresent(g -> metaData.setString("userGroup", g.id())); object.setString("url", withPath("/application/v4/tenant/" + tenant.getId().id(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) { if ( ! deployment.zone().environment().equals(Environment.prod)) return; Cursor bcpStatusObject = object.setObject("bcpStatus"); bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name()); } private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) { for (String endpoint : healthStatus.keySet()) { if (endpoint.contains(toDns(deployment.zone().environment().value())) && endpoint.contains(toDns(deployment.zone().region().value()))) { return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN); } } return RotationStatus.UNKNOWN; } private String toDns(String id) { return id.replace('_', '-'); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setString("version", jobRun.version().toFullString()); jobRun.revision().ifPresent(revision -> toSlime(revision, object.setObject("revision"))); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotSuperUserOrPartOfOpsDbGroup(UserGroup userGroup, HttpRequest request) { UserId userId = authorizer.getUserId(request); if (!authorizer.isSuperUser(request) && !authorizer.isGroupMember(userId, userGroup) ) { throw new ForbiddenException(String.format("User '%s' is not super user or part of the OpsDB user group '%s'", userId.id(), userGroup.id())); } } private void throwIfNotAthensDomainAdmin(AthensDomain tenantDomain, HttpRequest request) { UserId userId = authorizer.getUserId(request); if ( ! authorizer.isAthensDomainAdmin(userId, tenantDomain)) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athens domain '%s'", userId.id(), tenantDomain.id())); } } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private Slime toSlime(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(tenant, slime.setObject(), request, listApplications); return slime; } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result, long applicationZipSizeBytes) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.getRevisionId().id()); object.setLong("applicationZipSize", applicationZipSizeBytes); Cursor logArray = object.setArray("prepareMessages"); if (result.getPrepareResponse().log != null) { for (Log logMessage : result.getPrepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.getPrepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.getPrepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private Optional<ScrewdriverBuildJob> screwdriverBuildJobFromSlime(Inspector object) { if ( ! object.valid() ) return Optional.empty(); Optional<ScrewdriverId> screwdriverId = optional("screwdriverId", object).map(ScrewdriverId::new); return Optional.of(new ScrewdriverBuildJob(screwdriverId.orElse(null), gitRevisionFromSlime(object.field("gitRevision")))); } private GitRevision gitRevisionFromSlime(Inspector object) { return new GitRevision(optional("repository", object).map(GitRepository::new).orElse(null), optional("branch", object).map(GitBranch::new).orElse(null), optional("commit", object).map(GitCommit::new).orElse(null)); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } private Version decideDeployVersion(HttpRequest request) { String requestVersion = readToString(request.getData()); if (requestVersion != null) return new Version(requestVersion); else return controller.systemVersion(); } }
Consider rename function to reflect which builder is used
public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneDocumentStoreNumThreads(builder.background); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); }
tuneDocumentStoreNumThreads(builder.background);
public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneDocumentStoreNumThreads(builder.background); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); }
class NodeFlavorTuning implements ProtonConfig.Producer { static long MB = 1024 * 1024; static long GB = MB * 1024; private final Flavor nodeFlavor; public NodeFlavorTuning(Flavor nodeFlavor) { this.nodeFlavor = nodeFlavor; } @Override private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.size((long)nodeFlavor.getMinDiskAvailableGb() * GB); builder.hwinfo.memory.size((long)nodeFlavor.getMinMainMemoryAvailableGb() * GB); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (!nodeFlavor.hasFastDisk()) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = nodeFlavor.getMinMainMemoryAvailableGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneDocumentStoreNumThreads(ProtonConfig.Background.Builder builder) { builder.threads(max(8, (int)nodeFlavor.getMinCpuCores()/2)); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((nodeFlavor.getMinDiskAvailableGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } }
class NodeFlavorTuning implements ProtonConfig.Producer { static long MB = 1024 * 1024; static long GB = MB * 1024; private final Flavor nodeFlavor; public NodeFlavorTuning(Flavor nodeFlavor) { this.nodeFlavor = nodeFlavor; } @Override private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.size((long)nodeFlavor.getMinDiskAvailableGb() * GB); builder.hwinfo.memory.size((long)nodeFlavor.getMinMainMemoryAvailableGb() * GB); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (!nodeFlavor.hasFastDisk()) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = nodeFlavor.getMinMainMemoryAvailableGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneDocumentStoreNumThreads(ProtonConfig.Background.Builder builder) { builder.threads(max(8, (int)nodeFlavor.getMinCpuCores()/2)); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((nodeFlavor.getMinDiskAvailableGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } }
Can the `isLastBeforeProduction` method be removed then?
public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } }
(application.deploying().get() instanceof Change.VersionChange)))
public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** * Find jobs that can and should run but is currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(clock.instant().minus(jobTimeout))) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs(), jobTimeout); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().inProgress()) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs, Duration timeout) { Instant startOfGracePeriod = controller.clock().instant().minus(timeout); Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(JobStatus::inProgress) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(startOfGracePeriod)); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.inProgress()) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, clock.instant().minus(jobTimeout))) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(clock.instant().minus(jobTimeout))) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs(), jobTimeout); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().inProgress()) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs, Duration timeout) { Instant startOfGracePeriod = controller.clock().instant().minus(timeout); Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(JobStatus::inProgress) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(startOfGracePeriod)); } /** Decide whether the job should be triggered by the periodic trigger */ private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.inProgress()) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; } /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, clock.instant().minus(jobTimeout))) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Any practical reason for doing this change? This may not work for large rulesets where the joined commands exceed the kernels maximum allowed command length (`ARG_MAX`).
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
You could write the commands to a file and then execute that as a script.
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
In short, the goal is to reduce load on `node-admin`, `docker`/`kernel`. Everytime there is a burst of commands, there is an increased chance of something hanging indefinitely, and an ACL setup for a single node executes 8+ commands, this will reduce it to a single execution. As for the size, I don't think that'll be a problem; * 246 bytes initial * 38 + length of the IPv6 address, per IP * 28 bytes for final `REJECT` Even if we send the entire IPv6 without "::" shorthand (39 bytes per address), would need less than 8KB for the entire command with 100 addresses in ACL, this is way below default(?) `ARG_MAX` at 2097152 bytes
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
Correct.
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
recall = Recall.RECALLS_EVERYTHING;
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { removeOtherNonrankedChildren(item, i); recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) { Item childToKeep = parent.getItem(indexOfChildToKeep); for (int i = parent.getItemCount(); --i >= 0; ) { Item child = parent.getItem(i); if ( child != childToKeep && ! parent.getItem(i).isRanked()) parent.removeItem(i); } } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
Why is there an increased chance of something hanging? This approach will execute just as many `iptables` commands, just inside another shell. To reduce the number of commands you would have to use `ipset` instead. The only difference I see here is that `nsenter` will be executed once since there's only one call to `executeCommandInNetworkNamespace`, but the burst will be the same.
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
Yes,should have been more clear, the goal is to reduce the number of calls where enter container namespace, such as `docker exec` and `nsenter`. They become unreliable under high load and can randomly fail. Additionally the container can go down at any time, and calling once vs multiple times reduces number of errors due to container going down in the middle of the chain of commands.
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
I see, let's try it then.
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
private void applyAcl(ContainerName containerName, Acl acl) { if (isAclActive(containerName, acl)) { return; } final Command flush = new FlushCommand(Chain.INPUT); final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT); try { String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream()) .map(command -> command.asString(IPTABLES_COMMAND)) .collect(Collectors.joining("; ")); log.debug("Running ACL command '" + commands + "' in " + containerName.asString()); dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands); containerAcls.put(containerName, acl); } catch (Exception e) { log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e); try { dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND)); } catch (Exception ne) { log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
class AclMaintainer implements Runnable { private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class); private static final String IPTABLES_COMMAND = "ip6tables"; private final DockerOperations dockerOperations; private final NodeRepository nodeRepository; private final String nodeAdminHostname; private final Map<ContainerName, Acl> containerAcls; public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository, String nodeAdminHostname) { this.dockerOperations = dockerOperations; this.nodeRepository = nodeRepository; this.nodeAdminHostname = nodeAdminHostname; this.containerAcls = new HashMap<>(); } private boolean isAclActive(ContainerName containerName, Acl acl) { return Optional.ofNullable(containerAcls.get(containerName)) .map(acl::equals) .orElse(false); } private synchronized void configureAcls() { final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository .getContainerAclSpecs(nodeAdminHostname).stream() .collect(Collectors.groupingBy(ContainerAclSpec::trustedBy)); dockerOperations .getAllManagedContainers().stream() .filter(container -> container.state.isRunning()) .map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name))) .filter(pair -> pair.getSecond() != null) .forEach(pair -> applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond()))); } @Override public void run() { try { configureAcls(); } catch (Throwable t) { log.error("Failed to configure ACLs", t); } } }
I think this will cause dead jobs to be retried multiple times, or by the wrong mechanism, same applies to L201. I suggest having an overloaded `isRunning()` where the default `jobTimeoutLimit` is `Instant.MIN` - effectively the same behaviour as `inProgress`.
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
if (job.isRunning(jobTimeoutLimit())) return false;
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
But we don't actually consider a job running if it has been dead for more than 12 hours ... I think you are right that several of these maintenance jobs can now come to resurrect dead jobs, but that seems harmless to me - just a stepping stone towards replacing them by one. They will not be retried multiple times because trigger check if they already run.
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
if (job.isRunning(jobTimeoutLimit())) return false;
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Likely harmless in practice, yes, but it becomes even harder to reason about because the logged trigger cause may be wrong.
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
if (job.isRunning(jobTimeoutLimit())) return false;
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Yes, but simplification is coming ...
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
if (job.isRunning(jobTimeoutLimit())) return false;
private boolean shouldRetryNow(JobStatus job) { if (job.isSuccess()) return false; if (job.isRunning(jobTimeoutLimit())) return false; Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10); if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true; if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true; return false; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final static Duration jobTimeout = Duration.ofHours(12); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { try (Lock lock = applications().lock(report.applicationId())) { Application application = applications().require(report.applicationId()); application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (order.isFirst(report.jobType())) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true), lock); return; } } else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, String.format("%s completed successfully in build %d", report.jobType(), report.buildNumber()), lock); else if (isCapacityConstrained(report.jobType()) && shouldRetryOnOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, String.format("Retrying due to out of capacity in build %d", report.buildNumber()), lock); else if (shouldRetryNow(application)) application = trigger(report.jobType(), application, false, String.format("Retrying as build %d just started failing", report.buildNumber()), lock); applications().store(application, lock); } } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) { try (Lock lock = applications().lock(application.id())) { triggerReadyJobs(application, lock); } } } private void triggerReadyJobs(Application application, Lock lock) { if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { if (jobType.environment().equals(Environment.staging)) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus.isRunning(jobTimeoutLimit())) continue; for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(jobStatus, nextStatus)) trigger(nextJobType, application, false, "Triggering previously blocked job", lock); } } } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(JobStatus previous, JobStatus next) { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; JobStatus.JobRun previousSuccess = previous.lastSuccess().get(); JobStatus.JobRun nextSuccess = next.lastSuccess().get(); if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get())) return true; if (! previousSuccess.version().equals(nextSuccess.version())) return true; return false; } /** * Called periodically to cause triggering of jobs in the background */ public void triggerFailing(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if ( ! application.deploying().isPresent()) return; for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (isFailing(application.deploying().get(), jobStatus)) { if (shouldRetryNow(jobStatus)) { application = trigger(jobType, application, false, "Retrying failing job", lock); applications().store(application, lock); } break; } } Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs()); if (firstDeadJob.isPresent()) { application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job", lock); applications().store(application, lock); } } } /** Triggers jobs that have been delayed according to deployment spec */ public void triggerDelayed() { for (Application application : applications().asList()) { if ( ! application.deploying().isPresent() ) continue; if (application.deploymentJobs().hasFailures()) continue; if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue; if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) { continue; } Optional<JobStatus> lastSuccessfulJob = application.deploymentJobs().jobStatus().values() .stream() .filter(j -> j.lastSuccess().isPresent()) .sorted(Comparator.<JobStatus, Instant>comparing(j -> j.lastSuccess().get().at()).reversed()) .findFirst(); if ( ! lastSuccessfulJob.isPresent() ) continue; try (Lock lock = applications().lock(application.id())) { application = applications().require(application.id()); application = trigger(order.nextAfter(lastSuccessfulJob.get().type(), application), application, "Resuming delayed deployment", lock); applications().store(application, lock); } } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not upgrade " + application + ": A change is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, "Deploying change", lock); applications().store(application, lock); } } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { try (Lock lock = applications().lock(applicationId)) { Application application = applications().require(applicationId); buildSystem.removeJobs(application.id()); application = application.withDeploying(Optional.empty()); applications().store(application, lock); } } private ApplicationController applications() { return controller.applications(); } /** Returns whether a job is failing for the current change in the given application */ private boolean isFailing(Change change, JobStatus status) { return status != null && !status.isSuccess() && status.lastCompletedFor(change); } private boolean isCapacityConstrained(JobType jobType) { return jobType == JobType.stagingTest || jobType == JobType.systemTest; } /** Returns the first job that has been running for more than the given timeout */ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) { Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream() .filter(job -> job.isRunning(Instant.ofEpochMilli(0))) .sorted(Comparator.comparing(status -> status.lastTriggered().get().at())) .findFirst(); return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit())); } /** Decide whether the job should be triggered by the periodic trigger */ /** Retry immediately only if this just started failing. Otherwise retry periodically */ private boolean shouldRetryNow(Application application) { return application.deploymentJobs().failingSince().isAfter(clock.instant().minus(Duration.ofSeconds(10))); } /** Decide whether to retry due to capacity restrictions */ private boolean shouldRetryOnOutOfCapacity(Application application, JobType jobType) { Optional<JobError> outOfCapacityError = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType)) .flatMap(JobStatus::jobError) .filter(e -> e.equals(JobError.outOfCapacity)); if ( ! outOfCapacityError.isPresent()) return false; return application.deploymentJobs().jobStatus().get(jobType).firstFailing().get().at() .isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean deploysTo(Application application, JobType jobType) { Optional<Zone> zone = jobType.zone(controller.system()); if (zone.isPresent() && jobType.isProduction()) { if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) { return false; } } return true; } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param cause describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) { if (jobType == null) { return application; } if (jobType.isProduction() && application.deployingBlocked(clock.instant())) { return application; } if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) { return application; } if (jobType != JobType.component && ! application.deploying().isPresent()) { log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " + "currently deploying a change", jobType, application, cause)); return application; } if ( ! deploysTo(application, jobType)) { return application; } if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, cause)); return application; } if ( ! application.deploymentJobs().projectId().isPresent()) { return application; } log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), cause)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), controller); } private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) { for (JobType job : jobs) application = trigger(job, application, false, cause, lock); return application; } private boolean acceptNewRevisionNow(Application application) { if ( ! application.deploying().isPresent()) return true; if ( application.deploying().get() instanceof Change.ApplicationChange) return true; if ( application.deploymentJobs().hasFailures()) return true; if ( application.isBlocked(clock.instant())) return true; return false; } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } }
Good point. Fixed and added tests for correct categorization
public static boolean isTransient(int code) { return code >= TRANSIENT_ERROR; }
return code >= TRANSIENT_ERROR;
public static boolean isTransient(int code) { return (code >= TRANSIENT_ERROR) && (code < FATAL_ERROR); }
class ErrorCode { /** The code is here for completeness. */ public static final int NONE = 0; /** A general transient error, resending is possible. */ public static final int TRANSIENT_ERROR = 100000; /** Sending was rejected because throttler capacity is full. */ public static final int SEND_QUEUE_FULL = TRANSIENT_ERROR + 1; /** No addresses found for the services of the message route. */ public static final int NO_ADDRESS_FOR_SERVICE = TRANSIENT_ERROR + 2; /** A connection problem occured while sending. */ public static final int CONNECTION_ERROR = TRANSIENT_ERROR + 3; /** The session specified for the message is unknown. */ public static final int UNKNOWN_SESSION = TRANSIENT_ERROR + 4; /** The recipient session is busy. */ public static final int SESSION_BUSY = TRANSIENT_ERROR + 5; /** Sending aborted by route verification. */ public static final int SEND_ABORTED = TRANSIENT_ERROR + 6; /** Version handshake failed for any reason. */ public static final int HANDSHAKE_FAILED = TRANSIENT_ERROR + 7; /** An application specific transient error. */ public static final int APP_TRANSIENT_ERROR = TRANSIENT_ERROR + 50000; /** A general non-recoverable error, resending is not possible. */ public static final int FATAL_ERROR = 200000; /** Sending was rejected because throttler is closed. */ public static final int SEND_QUEUE_CLOSED = FATAL_ERROR + 1; /** The route of the message is illegal. */ public static final int ILLEGAL_ROUTE = FATAL_ERROR + 2; /** No services found for the message route. */ public static final int NO_SERVICES_FOR_ROUTE = FATAL_ERROR + 3; /** An error occured while encoding the message. */ public static final int ENCODE_ERROR = FATAL_ERROR + 5; /** A fatal network error occured while sending. */ public static final int NETWORK_ERROR = FATAL_ERROR + 6; /** The protocol specified for the message is unknown. */ public static final int UNKNOWN_PROTOCOL = FATAL_ERROR + 7; /** An error occured while decoding the message. */ public static final int DECODE_ERROR = FATAL_ERROR + 8; /** A timeout occured while sending. */ public static final int TIMEOUT = FATAL_ERROR + 9; /** The target is running an incompatible version. */ public static final int INCOMPATIBLE_VERSION = FATAL_ERROR + 10; /** The policy specified in a route is unknown. */ public static final int UNKNOWN_POLICY = FATAL_ERROR + 11; /** The network was shut down when attempting to send. */ public static final int NETWORK_SHUTDOWN = FATAL_ERROR + 12; /** Exception thrown by routing policy. */ public static final int POLICY_ERROR = FATAL_ERROR + 13; /** An error occured while sequencing a message. */ public static final int SEQUENCE_ERROR = FATAL_ERROR + 14; /** An application specific non-recoverable error. */ public static final int APP_FATAL_ERROR = FATAL_ERROR + 50000; /** No error codes are allowed to be this big. */ public static final int ERROR_LIMIT = APP_FATAL_ERROR + 50000; /** * Translates the given error code into its symbolic name. * * @param error The error code to translate. * @return The symbolic name. */ public static String getName(int error) { switch (error) { case APP_FATAL_ERROR : return "APP_FATAL_ERROR"; case APP_TRANSIENT_ERROR : return "APP_TRANSIENT_ERROR"; case CONNECTION_ERROR : return "CONNECTION_ERROR"; case DECODE_ERROR : return "DECODE_ERROR"; case ENCODE_ERROR : return "ENCODE_ERROR"; case FATAL_ERROR : return "FATAL_ERROR"; case HANDSHAKE_FAILED : return "HANDSHAKE_FAILED"; case ILLEGAL_ROUTE : return "ILLEGAL_ROUTE"; case INCOMPATIBLE_VERSION : return "INCOMPATIBLE_VERSION"; case NETWORK_ERROR : return "NETWORK_ERROR"; case NETWORK_SHUTDOWN : return "NETWORK_SHUTDOWN"; case NO_ADDRESS_FOR_SERVICE : return "NO_ADDRESS_FOR_SERVICE"; case NO_SERVICES_FOR_ROUTE : return "NO_SERVICES_FOR_ROUTE"; case NONE : return "NONE"; case POLICY_ERROR : return "POLICY_ERROR"; case SEND_ABORTED : return "SEND_ABORTED"; case SEND_QUEUE_CLOSED : return "SEND_QUEUE_CLOSED"; case SEND_QUEUE_FULL : return "SEND_QUEUE_FULL"; case SEQUENCE_ERROR : return "SEQUENCE_ERROR"; case SESSION_BUSY : return "SESSION_BUSY"; case TIMEOUT : return "TIMEOUT"; case TRANSIENT_ERROR : return "TRANSIENT_ERROR"; case UNKNOWN_POLICY : return "UNKNOWN_POLICY"; case UNKNOWN_PROTOCOL : return "UNKNOWN_PROTOCOL"; case UNKNOWN_SESSION : return "UNKNOWN_SESSION"; default : return "UNKNOWN(" + error + ")"; } } public static boolean isFatal(int code) { return code >= FATAL_ERROR; } public static boolean isMBusError(int code) { return ((code < APP_TRANSIENT_ERROR) && isTransient(code)) || ((code < APP_FATAL_ERROR) && isFatal(code)); } }
class ErrorCode { /** The code is here for completeness. */ public static final int NONE = 0; /** A general transient error, resending is possible. */ public static final int TRANSIENT_ERROR = 100000; /** Sending was rejected because throttler capacity is full. */ public static final int SEND_QUEUE_FULL = TRANSIENT_ERROR + 1; /** No addresses found for the services of the message route. */ public static final int NO_ADDRESS_FOR_SERVICE = TRANSIENT_ERROR + 2; /** A connection problem occured while sending. */ public static final int CONNECTION_ERROR = TRANSIENT_ERROR + 3; /** The session specified for the message is unknown. */ public static final int UNKNOWN_SESSION = TRANSIENT_ERROR + 4; /** The recipient session is busy. */ public static final int SESSION_BUSY = TRANSIENT_ERROR + 5; /** Sending aborted by route verification. */ public static final int SEND_ABORTED = TRANSIENT_ERROR + 6; /** Version handshake failed for any reason. */ public static final int HANDSHAKE_FAILED = TRANSIENT_ERROR + 7; /** An application specific transient error. */ public static final int APP_TRANSIENT_ERROR = TRANSIENT_ERROR + 50000; /** A general non-recoverable error, resending is not possible. */ public static final int FATAL_ERROR = 200000; /** Sending was rejected because throttler is closed. */ public static final int SEND_QUEUE_CLOSED = FATAL_ERROR + 1; /** The route of the message is illegal. */ public static final int ILLEGAL_ROUTE = FATAL_ERROR + 2; /** No services found for the message route. */ public static final int NO_SERVICES_FOR_ROUTE = FATAL_ERROR + 3; /** An error occured while encoding the message. */ public static final int ENCODE_ERROR = FATAL_ERROR + 5; /** A fatal network error occured while sending. */ public static final int NETWORK_ERROR = FATAL_ERROR + 6; /** The protocol specified for the message is unknown. */ public static final int UNKNOWN_PROTOCOL = FATAL_ERROR + 7; /** An error occured while decoding the message. */ public static final int DECODE_ERROR = FATAL_ERROR + 8; /** A timeout occured while sending. */ public static final int TIMEOUT = FATAL_ERROR + 9; /** The target is running an incompatible version. */ public static final int INCOMPATIBLE_VERSION = FATAL_ERROR + 10; /** The policy specified in a route is unknown. */ public static final int UNKNOWN_POLICY = FATAL_ERROR + 11; /** The network was shut down when attempting to send. */ public static final int NETWORK_SHUTDOWN = FATAL_ERROR + 12; /** Exception thrown by routing policy. */ public static final int POLICY_ERROR = FATAL_ERROR + 13; /** An error occured while sequencing a message. */ public static final int SEQUENCE_ERROR = FATAL_ERROR + 14; /** An application specific non-recoverable error. */ public static final int APP_FATAL_ERROR = FATAL_ERROR + 50000; /** No error codes are allowed to be this big. */ public static final int ERROR_LIMIT = APP_FATAL_ERROR + 50000; /** * Translates the given error code into its symbolic name. * * @param error The error code to translate. * @return The symbolic name. */ public static String getName(int error) { switch (error) { case APP_FATAL_ERROR : return "APP_FATAL_ERROR"; case APP_TRANSIENT_ERROR : return "APP_TRANSIENT_ERROR"; case CONNECTION_ERROR : return "CONNECTION_ERROR"; case DECODE_ERROR : return "DECODE_ERROR"; case ENCODE_ERROR : return "ENCODE_ERROR"; case FATAL_ERROR : return "FATAL_ERROR"; case HANDSHAKE_FAILED : return "HANDSHAKE_FAILED"; case ILLEGAL_ROUTE : return "ILLEGAL_ROUTE"; case INCOMPATIBLE_VERSION : return "INCOMPATIBLE_VERSION"; case NETWORK_ERROR : return "NETWORK_ERROR"; case NETWORK_SHUTDOWN : return "NETWORK_SHUTDOWN"; case NO_ADDRESS_FOR_SERVICE : return "NO_ADDRESS_FOR_SERVICE"; case NO_SERVICES_FOR_ROUTE : return "NO_SERVICES_FOR_ROUTE"; case NONE : return "NONE"; case POLICY_ERROR : return "POLICY_ERROR"; case SEND_ABORTED : return "SEND_ABORTED"; case SEND_QUEUE_CLOSED : return "SEND_QUEUE_CLOSED"; case SEND_QUEUE_FULL : return "SEND_QUEUE_FULL"; case SEQUENCE_ERROR : return "SEQUENCE_ERROR"; case SESSION_BUSY : return "SESSION_BUSY"; case TIMEOUT : return "TIMEOUT"; case TRANSIENT_ERROR : return "TRANSIENT_ERROR"; case UNKNOWN_POLICY : return "UNKNOWN_POLICY"; case UNKNOWN_PROTOCOL : return "UNKNOWN_PROTOCOL"; case UNKNOWN_SESSION : return "UNKNOWN_SESSION"; default : return "UNKNOWN(" + error + ")"; } } public static boolean isFatal(int code) { return code >= FATAL_ERROR; } public static boolean isMBusError(int code) { return ((code < APP_TRANSIENT_ERROR) && isTransient(code)) || ((code < APP_FATAL_ERROR) && isFatal(code)) || ((code < TRANSIENT_ERROR) && (code >= NONE)); } }
This should be `clusterInfoCpuField` ? Same for mem and disk
private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoTypeField).asDouble(); double flavorMem = inspector.field(clusterInfoTypeField).asDouble(); double flavorDisk = inspector.field(clusterInfoTypeField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); }
double flavorCpu = inspector.field(clusterInfoTypeField).asDouble();
private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); }
class ApplicationSerializer { private final String idField = "id"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String outstandingChangeField = "outstandingChangeField"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String applicationPackageHashField = "applicationPackageHash"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String jiraIssueIdField = "jiraIssueId"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String versionField = "version"; private final String revisionField = "revision"; private final String atField = "at"; private final String upgradeField = "upgrade"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.deploying(), root); root.setBool(outstandingChangeField, application.hasOutstandingChange()); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.revision(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneToSlime(Zone zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationRevision applicationRevision, Cursor object) { object.setString(applicationPackageHashField, applicationRevision.id()); if (applicationRevision.source().isPresent()) toSlime(applicationRevision.source().get(), object.setObject(sourceRevisionField)); } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId() .filter(id -> id > 0) .ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.jiraIssueId().ifPresent(jiraIssueId -> cursor.setString(jiraIssueIdField, jiraIssueId)); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().id()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobRunToSlime(jobStatus.lastTriggered(), object, lastTriggeredField); jobRunToSlime(jobStatus.lastCompleted(), object, lastCompletedField); jobRunToSlime(jobStatus.firstFailing(), object, firstFailingField); jobRunToSlime(jobStatus.lastSuccess(), object, lastSuccessField); } private void jobRunToSlime(Optional<JobStatus.JobRun> jobRun, Cursor parent, String jobRunObjectName) { if ( ! jobRun.isPresent()) return; Cursor object = parent.setObject(jobRunObjectName); object.setString(versionField, jobRun.get().version().toString()); if ( jobRun.get().revision().isPresent()) toSlime(jobRun.get().revision().get(), object.setObject(revisionField)); object.setBool(upgradeField, jobRun.get().upgrade()); object.setLong(atField, jobRun.get().at().toEpochMilli()); } private void toSlime(Optional<Change> deploying, Cursor parentObject) { if ( ! deploying.isPresent()) return; Cursor object = parentObject.setObject(deployingField); if (deploying.get() instanceof Change.VersionChange) object.setString(versionField, ((Change.VersionChange)deploying.get()).version().toString()); else if (((Change.ApplicationChange)deploying.get()).revision().isPresent()) toSlime(((Change.ApplicationChange)deploying.get()).revision().get(), object); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString()); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Optional<Change> deploying = changeFromSlime(root.field(deployingField)); boolean outstandingChange = root.field(outstandingChangeField).asBool(); return new Application(id, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneFromSlime(deploymentObject.field(zoneField)), applicationRevisionFromSlime(deploymentObject.field(applicationPackageRevisionField)).get(), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField))); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector obect) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(obect))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private Zone zoneFromSlime(Inspector object) { return new Zone(Environment.from(object.field(environmentField).asString()), RegionName.from(object.field(regionField).asString())); } private Optional<ApplicationRevision> applicationRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); String applicationPackageHash = object.field(applicationPackageHashField).asString(); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); return sourceRevision.isPresent() ? Optional.of(ApplicationRevision.from(applicationPackageHash, sourceRevision.get())) : Optional.of(ApplicationRevision.from(applicationPackageHash)); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { Optional<Long> projectId = optionalLong(object.field(projectIdField)) .filter(id -> id > 0); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<String> jiraIssueKey = optionalString(object.field(jiraIssueIdField)); return new DeploymentJobs(projectId, jobStatusList, jiraIssueKey); } private Optional<Change> changeFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); Inspector versionFieldValue = object.field(versionField); if (versionFieldValue.valid()) return Optional.of(new Change.VersionChange(Version.fromString(versionFieldValue.asString()))); else if (object.field(applicationPackageHashField).valid()) return Optional.of(Change.ApplicationChange.of(applicationRevisionFromSlime(object).get())); else return Optional.of(Change.ApplicationChange.unknown()); } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusList.add(jobStatusFromSlime(item))); return jobStatusList; } private JobStatus jobStatusFromSlime(Inspector object) { DeploymentJobs.JobType jobType = DeploymentJobs.JobType.fromId(object.field(jobTypeField).asString()); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return new JobStatus(jobType, jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(new Version(object.field(versionField).asString()), applicationRevisionFromSlime(object.field(revisionField)), object.field(upgradeField).asBool(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private Optional<Long> optionalLong(Inspector field) { return field.valid() ? Optional.of(field.asLong()) : Optional.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } }
class ApplicationSerializer { private final String idField = "id"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String outstandingChangeField = "outstandingChangeField"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String applicationPackageHashField = "applicationPackageHash"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String jiraIssueIdField = "jiraIssueId"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String versionField = "version"; private final String revisionField = "revision"; private final String atField = "at"; private final String upgradeField = "upgrade"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.deploying(), root); root.setBool(outstandingChangeField, application.hasOutstandingChange()); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.revision(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneToSlime(Zone zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationRevision applicationRevision, Cursor object) { object.setString(applicationPackageHashField, applicationRevision.id()); if (applicationRevision.source().isPresent()) toSlime(applicationRevision.source().get(), object.setObject(sourceRevisionField)); } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId() .filter(id -> id > 0) .ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.jiraIssueId().ifPresent(jiraIssueId -> cursor.setString(jiraIssueIdField, jiraIssueId)); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().id()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobRunToSlime(jobStatus.lastTriggered(), object, lastTriggeredField); jobRunToSlime(jobStatus.lastCompleted(), object, lastCompletedField); jobRunToSlime(jobStatus.firstFailing(), object, firstFailingField); jobRunToSlime(jobStatus.lastSuccess(), object, lastSuccessField); } private void jobRunToSlime(Optional<JobStatus.JobRun> jobRun, Cursor parent, String jobRunObjectName) { if ( ! jobRun.isPresent()) return; Cursor object = parent.setObject(jobRunObjectName); object.setString(versionField, jobRun.get().version().toString()); if ( jobRun.get().revision().isPresent()) toSlime(jobRun.get().revision().get(), object.setObject(revisionField)); object.setBool(upgradeField, jobRun.get().upgrade()); object.setLong(atField, jobRun.get().at().toEpochMilli()); } private void toSlime(Optional<Change> deploying, Cursor parentObject) { if ( ! deploying.isPresent()) return; Cursor object = parentObject.setObject(deployingField); if (deploying.get() instanceof Change.VersionChange) object.setString(versionField, ((Change.VersionChange)deploying.get()).version().toString()); else if (((Change.ApplicationChange)deploying.get()).revision().isPresent()) toSlime(((Change.ApplicationChange)deploying.get()).revision().get(), object); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString()); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Optional<Change> deploying = changeFromSlime(root.field(deployingField)); boolean outstandingChange = root.field(outstandingChangeField).asBool(); return new Application(id, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneFromSlime(deploymentObject.field(zoneField)), applicationRevisionFromSlime(deploymentObject.field(applicationPackageRevisionField)).get(), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField))); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector obect) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(obect))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private Zone zoneFromSlime(Inspector object) { return new Zone(Environment.from(object.field(environmentField).asString()), RegionName.from(object.field(regionField).asString())); } private Optional<ApplicationRevision> applicationRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); String applicationPackageHash = object.field(applicationPackageHashField).asString(); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); return sourceRevision.isPresent() ? Optional.of(ApplicationRevision.from(applicationPackageHash, sourceRevision.get())) : Optional.of(ApplicationRevision.from(applicationPackageHash)); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { Optional<Long> projectId = optionalLong(object.field(projectIdField)) .filter(id -> id > 0); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<String> jiraIssueKey = optionalString(object.field(jiraIssueIdField)); return new DeploymentJobs(projectId, jobStatusList, jiraIssueKey); } private Optional<Change> changeFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); Inspector versionFieldValue = object.field(versionField); if (versionFieldValue.valid()) return Optional.of(new Change.VersionChange(Version.fromString(versionFieldValue.asString()))); else if (object.field(applicationPackageHashField).valid()) return Optional.of(Change.ApplicationChange.of(applicationRevisionFromSlime(object).get())); else return Optional.of(Change.ApplicationChange.unknown()); } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusList.add(jobStatusFromSlime(item))); return jobStatusList; } private JobStatus jobStatusFromSlime(Inspector object) { DeploymentJobs.JobType jobType = DeploymentJobs.JobType.fromId(object.field(jobTypeField).asString()); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return new JobStatus(jobType, jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(new Version(object.field(versionField).asString()), applicationRevisionFromSlime(object.field(revisionField)), object.field(upgradeField).asBool(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private Optional<Long> optionalLong(Inspector field) { return field.valid() ? Optional.of(field.asLong()) : Optional.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } }
Ok, done.
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
recall = Recall.RECALLS_EVERYTHING;
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { removeOtherNonrankedChildren(item, i); recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) { Item childToKeep = parent.getItem(indexOfChildToKeep); for (int i = parent.getItemCount(); --i >= 0; ) { Item child = parent.getItem(i); if ( child != childToKeep && ! parent.getItem(i).isRanked()) parent.removeItem(i); } } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
Great catch, I added unit test for this part and fixed the field names.
private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoTypeField).asDouble(); double flavorMem = inspector.field(clusterInfoTypeField).asDouble(); double flavorDisk = inspector.field(clusterInfoTypeField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); }
double flavorCpu = inspector.field(clusterInfoTypeField).asDouble();
private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); }
class ApplicationSerializer { private final String idField = "id"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String outstandingChangeField = "outstandingChangeField"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String applicationPackageHashField = "applicationPackageHash"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String jiraIssueIdField = "jiraIssueId"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String versionField = "version"; private final String revisionField = "revision"; private final String atField = "at"; private final String upgradeField = "upgrade"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.deploying(), root); root.setBool(outstandingChangeField, application.hasOutstandingChange()); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.revision(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneToSlime(Zone zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationRevision applicationRevision, Cursor object) { object.setString(applicationPackageHashField, applicationRevision.id()); if (applicationRevision.source().isPresent()) toSlime(applicationRevision.source().get(), object.setObject(sourceRevisionField)); } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId() .filter(id -> id > 0) .ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.jiraIssueId().ifPresent(jiraIssueId -> cursor.setString(jiraIssueIdField, jiraIssueId)); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().id()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobRunToSlime(jobStatus.lastTriggered(), object, lastTriggeredField); jobRunToSlime(jobStatus.lastCompleted(), object, lastCompletedField); jobRunToSlime(jobStatus.firstFailing(), object, firstFailingField); jobRunToSlime(jobStatus.lastSuccess(), object, lastSuccessField); } private void jobRunToSlime(Optional<JobStatus.JobRun> jobRun, Cursor parent, String jobRunObjectName) { if ( ! jobRun.isPresent()) return; Cursor object = parent.setObject(jobRunObjectName); object.setString(versionField, jobRun.get().version().toString()); if ( jobRun.get().revision().isPresent()) toSlime(jobRun.get().revision().get(), object.setObject(revisionField)); object.setBool(upgradeField, jobRun.get().upgrade()); object.setLong(atField, jobRun.get().at().toEpochMilli()); } private void toSlime(Optional<Change> deploying, Cursor parentObject) { if ( ! deploying.isPresent()) return; Cursor object = parentObject.setObject(deployingField); if (deploying.get() instanceof Change.VersionChange) object.setString(versionField, ((Change.VersionChange)deploying.get()).version().toString()); else if (((Change.ApplicationChange)deploying.get()).revision().isPresent()) toSlime(((Change.ApplicationChange)deploying.get()).revision().get(), object); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString()); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Optional<Change> deploying = changeFromSlime(root.field(deployingField)); boolean outstandingChange = root.field(outstandingChangeField).asBool(); return new Application(id, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneFromSlime(deploymentObject.field(zoneField)), applicationRevisionFromSlime(deploymentObject.field(applicationPackageRevisionField)).get(), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField))); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector obect) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(obect))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private Zone zoneFromSlime(Inspector object) { return new Zone(Environment.from(object.field(environmentField).asString()), RegionName.from(object.field(regionField).asString())); } private Optional<ApplicationRevision> applicationRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); String applicationPackageHash = object.field(applicationPackageHashField).asString(); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); return sourceRevision.isPresent() ? Optional.of(ApplicationRevision.from(applicationPackageHash, sourceRevision.get())) : Optional.of(ApplicationRevision.from(applicationPackageHash)); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { Optional<Long> projectId = optionalLong(object.field(projectIdField)) .filter(id -> id > 0); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<String> jiraIssueKey = optionalString(object.field(jiraIssueIdField)); return new DeploymentJobs(projectId, jobStatusList, jiraIssueKey); } private Optional<Change> changeFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); Inspector versionFieldValue = object.field(versionField); if (versionFieldValue.valid()) return Optional.of(new Change.VersionChange(Version.fromString(versionFieldValue.asString()))); else if (object.field(applicationPackageHashField).valid()) return Optional.of(Change.ApplicationChange.of(applicationRevisionFromSlime(object).get())); else return Optional.of(Change.ApplicationChange.unknown()); } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusList.add(jobStatusFromSlime(item))); return jobStatusList; } private JobStatus jobStatusFromSlime(Inspector object) { DeploymentJobs.JobType jobType = DeploymentJobs.JobType.fromId(object.field(jobTypeField).asString()); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return new JobStatus(jobType, jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(new Version(object.field(versionField).asString()), applicationRevisionFromSlime(object.field(revisionField)), object.field(upgradeField).asBool(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private Optional<Long> optionalLong(Inspector field) { return field.valid() ? Optional.of(field.asLong()) : Optional.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } }
class ApplicationSerializer { private final String idField = "id"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String outstandingChangeField = "outstandingChangeField"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String applicationPackageHashField = "applicationPackageHash"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String jiraIssueIdField = "jiraIssueId"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String versionField = "version"; private final String revisionField = "revision"; private final String atField = "at"; private final String upgradeField = "upgrade"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.deploying(), root); root.setBool(outstandingChangeField, application.hasOutstandingChange()); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.revision(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneToSlime(Zone zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationRevision applicationRevision, Cursor object) { object.setString(applicationPackageHashField, applicationRevision.id()); if (applicationRevision.source().isPresent()) toSlime(applicationRevision.source().get(), object.setObject(sourceRevisionField)); } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId() .filter(id -> id > 0) .ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.jiraIssueId().ifPresent(jiraIssueId -> cursor.setString(jiraIssueIdField, jiraIssueId)); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().id()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobRunToSlime(jobStatus.lastTriggered(), object, lastTriggeredField); jobRunToSlime(jobStatus.lastCompleted(), object, lastCompletedField); jobRunToSlime(jobStatus.firstFailing(), object, firstFailingField); jobRunToSlime(jobStatus.lastSuccess(), object, lastSuccessField); } private void jobRunToSlime(Optional<JobStatus.JobRun> jobRun, Cursor parent, String jobRunObjectName) { if ( ! jobRun.isPresent()) return; Cursor object = parent.setObject(jobRunObjectName); object.setString(versionField, jobRun.get().version().toString()); if ( jobRun.get().revision().isPresent()) toSlime(jobRun.get().revision().get(), object.setObject(revisionField)); object.setBool(upgradeField, jobRun.get().upgrade()); object.setLong(atField, jobRun.get().at().toEpochMilli()); } private void toSlime(Optional<Change> deploying, Cursor parentObject) { if ( ! deploying.isPresent()) return; Cursor object = parentObject.setObject(deployingField); if (deploying.get() instanceof Change.VersionChange) object.setString(versionField, ((Change.VersionChange)deploying.get()).version().toString()); else if (((Change.ApplicationChange)deploying.get()).revision().isPresent()) toSlime(((Change.ApplicationChange)deploying.get()).revision().get(), object); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString()); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Optional<Change> deploying = changeFromSlime(root.field(deployingField)); boolean outstandingChange = root.field(outstandingChangeField).asBool(); return new Application(id, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneFromSlime(deploymentObject.field(zoneField)), applicationRevisionFromSlime(deploymentObject.field(applicationPackageRevisionField)).get(), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField))); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector obect) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(obect))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private Zone zoneFromSlime(Inspector object) { return new Zone(Environment.from(object.field(environmentField).asString()), RegionName.from(object.field(regionField).asString())); } private Optional<ApplicationRevision> applicationRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); String applicationPackageHash = object.field(applicationPackageHashField).asString(); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); return sourceRevision.isPresent() ? Optional.of(ApplicationRevision.from(applicationPackageHash, sourceRevision.get())) : Optional.of(ApplicationRevision.from(applicationPackageHash)); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { Optional<Long> projectId = optionalLong(object.field(projectIdField)) .filter(id -> id > 0); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<String> jiraIssueKey = optionalString(object.field(jiraIssueIdField)); return new DeploymentJobs(projectId, jobStatusList, jiraIssueKey); } private Optional<Change> changeFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); Inspector versionFieldValue = object.field(versionField); if (versionFieldValue.valid()) return Optional.of(new Change.VersionChange(Version.fromString(versionFieldValue.asString()))); else if (object.field(applicationPackageHashField).valid()) return Optional.of(Change.ApplicationChange.of(applicationRevisionFromSlime(object).get())); else return Optional.of(Change.ApplicationChange.unknown()); } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusList.add(jobStatusFromSlime(item))); return jobStatusList; } private JobStatus jobStatusFromSlime(Inspector object) { DeploymentJobs.JobType jobType = DeploymentJobs.JobType.fromId(object.field(jobTypeField).asString()); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return new JobStatus(jobType, jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(new Version(object.field(versionField).asString()), applicationRevisionFromSlime(object.field(revisionField)), object.field(upgradeField).asBool(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private Optional<Long> optionalLong(Inspector field) { return field.valid() ? Optional.of(field.asLong()) : Optional.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } }
"lock" is already in the path so no need for it in the node name imho
public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout); }
return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout);
public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), Duration.ofSeconds(1)); }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofMinutes(30)); } public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), defaultLockTimeout); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), Duration.ofSeconds(1)); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
likewise
public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), defaultLockTimeout); }
return lock(root.append("locks").append("openStackServerPoolLock"), defaultLockTimeout);
public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), Duration.ofSeconds(1)); }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofMinutes(30)); } public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), Duration.ofSeconds(1)); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
Also, updating the server pool should never take more than a few seconds at most, as it's always just writing data to ZK - so we could probably use a much shorter lock timeout
public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout); }
return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout);
public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), Duration.ofSeconds(1)); }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofMinutes(30)); } public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), defaultLockTimeout); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), Duration.ofSeconds(1)); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
Guess it makes sense to have short timeout, but I'll keep path the same so at least its consistent with the already existing locks.
public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout); }
return lock(root.append("locks").append("vespaServerPoolLock"), defaultLockTimeout);
public Lock lockVespaServerPool() { return lock(root.append("locks").append("vespaServerPoolLock"), Duration.ofSeconds(1)); }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofMinutes(30)); } public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), defaultLockTimeout); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
class CuratorDb { /** Use a nonstandard zk port to avoid interfering with connection to the config server zk cluster */ private static final int zooKeeperPort = 2281; private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Path root = Path.fromString("/controller/v1"); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private final StringSetSerializer stringSetSerializer = new StringSetSerializer(); private final JobQueueSerializer jobQueueSerializer = new JobQueueSerializer(); @SuppressWarnings("unused") private final ZooKeeperServer zooKeeperServer; private final Curator curator; /** * All keys, to allow reentrancy. * This will grow forever, but this should be too slow to be a problem. */ private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>(); /** Create a curator db which also set up a ZooKeeper server (such that this instance is both client and server) */ @Inject public CuratorDb(ClusterInfoConfig clusterInfo) { this.zooKeeperServer = new ZooKeeperServer(toZookeeperServerConfig(clusterInfo)); this.curator = new Curator(toConnectionSpec(clusterInfo)); } /** Create a curator db which does not set up a server, using the given Curator instance */ protected CuratorDb(Curator curator) { this.zooKeeperServer = null; this.curator = curator; } private static ZookeeperServerConfig toZookeeperServerConfig(ClusterInfoConfig clusterInfo) { ZookeeperServerConfig.Builder b = new ZookeeperServerConfig.Builder(); b.zooKeeperConfigFile("conf/zookeeper/controller-zookeeper.cfg"); b.dataDir("var/controller-zookeeper"); b.clientPort(zooKeeperPort); b.myidFile("var/controller-zookeeper/myid"); b.myid(myIndex(clusterInfo)); for (ClusterInfoConfig.Services clusterMember : clusterInfo.services()) { ZookeeperServerConfig.Server.Builder server = new ZookeeperServerConfig.Server.Builder(); server.id(clusterMember.index()); server.hostname(clusterMember.hostname()); server.quorumPort(zooKeeperPort + 1); server.electionPort(zooKeeperPort + 2); b.server(server); } return new ZookeeperServerConfig(b); } private static Integer myIndex(ClusterInfoConfig clusterInfo) { String hostname = HostName.getLocalhost(); return clusterInfo.services().stream() .filter(service -> service.hostname().equals(hostname)) .map(ClusterInfoConfig.Services::index) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find index for this node by hostname '" + hostname + "'")); } private static String toConnectionSpec(ClusterInfoConfig clusterInfo) { return clusterInfo.services().stream() .map(member -> member.hostname() + ":" + zooKeeperPort) .collect(Collectors.joining(",")); } public Lock lock(TenantId id, Duration timeout) { return lock(lockPath(id), timeout); } public Lock lock(ApplicationId id, Duration timeout) { return lock(lockPath(id), timeout); } /** Create a reentrant lock */ private Lock lock(Path path, Duration timeout) { Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator)); lock.acquire(timeout); return lock; } public Lock lockInactiveJobs() { return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout); } public Lock lockJobQueues() { return lock(root.append("locks").append("jobQueuesLock"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1)); } public Lock lockProvisionState(String provisionStateId) { return lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockOpenStackServerPool() { return lock(root.append("locks").append("openStackServerPoolLock"), Duration.ofSeconds(1)); } public Set<String> readInactiveJobs() { try { Optional<byte[]> data = curator.getData(inactiveJobsPath()); if (! data.isPresent() || data.get().length == 0) return new HashSet<>(); return stringSetSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new HashSet<>(); } } public void writeInactiveJobs(Set<String> inactiveJobs) { NestedTransaction transaction = new NestedTransaction(); curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs)); transaction.commit(); } public Deque<ApplicationId> readJobQueue(DeploymentJobs.JobType jobType) { try { Optional<byte[]> data = curator.getData(jobQueuePath(jobType)); if (! data.isPresent() || data.get().length == 0) return new ArrayDeque<>(); return jobQueueSerializer.fromJson(data.get()); } catch (RuntimeException e) { log.log(Level.WARNING, "Error reading job queue, deleting inactive state"); writeInactiveJobs(Collections.emptySet()); return new ArrayDeque<>(); } } public void writeJobQueue(DeploymentJobs.JobType jobType, Deque<ApplicationId> queue) { NestedTransaction transaction = new NestedTransaction(); curator.set(jobQueuePath(jobType), jobQueueSerializer.toJson(queue)); transaction.commit(); } public double readUpgradesPerMinute() { Optional<byte[]> n = curator.getData(upgradesPerMinutePath()); if (!n.isPresent() || n.get().length == 0) { return 0.5; } return ByteBuffer.wrap(n.get()).getDouble(); } public void writeUpgradesPerMinute(double n) { if (n < 0) { throw new IllegalArgumentException("Upgrades per minute must be >= 0"); } NestedTransaction transaction = new NestedTransaction(); curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); transaction.commit(); } public void writeVersionStatus(VersionStatus status) { VersionStatusSerializer serializer = new VersionStatusSerializer(); NestedTransaction transaction = new NestedTransaction(); try { if (curator.getData(systemVersionPath()).isPresent()) { curator.delete(systemVersionPath()); } curator.set(versionStatusPath(), SlimeUtils.toJsonBytes(serializer.toSlime(status))); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize version status", e); } transaction.commit(); } public VersionStatus readVersionStatus() { Optional<byte[]> data = curator.getData(versionStatusPath()); if (!data.isPresent() || data.get().length == 0) { return VersionStatus.empty(); } VersionStatusSerializer serializer = new VersionStatusSerializer(); return serializer.fromSlime(SlimeUtils.jsonToSlime(data.get())); } public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public Optional<byte[]> readVespaServerPool() { return curator.getData(vespaServerPoolPath()); } public void writeVespaServerPool(byte[] data) { curator.set(vespaServerPoolPath(), data); } public Optional<byte[]> readOpenStackServerPool() { return curator.getData(openStackServerPoolPath()); } public void writeOpenStackServerPool(byte[] data) { curator.set(openStackServerPoolPath(), data); } private Path systemVersionPath() { return root.append("systemVersion"); } private Path lockPath(TenantId tenant) { Path lockPath = root.append("locks") .append(tenant.id()); curator.create(lockPath); return lockPath; } private Path lockPath(ApplicationId application) { Path lockPath = root.append("locks") .append(application.tenant().value()) .append(application.application().value()) .append(application.instance().value()); curator.create(lockPath); return lockPath; } private Path lockPath(String provisionId) { Path lockPath = root.append("locks") .append(provisionStatePath()); curator.create(lockPath); return lockPath; } private Path inactiveJobsPath() { return root.append("inactiveJobs"); } private Path jobQueuePath(DeploymentJobs.JobType jobType) { return root.append("jobQueues").append(jobType.name()); } private Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private Path versionStatusPath() { return root.append("versionStatus"); } private Path provisionStatePath() { return root.append("provisioning").append("states"); } private Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private Path vespaServerPoolPath() { return root.append("vespaServerPool"); } private Path openStackServerPoolPath() { return root.append("openStackServerPool"); } }
Ah, right. This `finally` block is confusing me. The idea is that we want to remove the temp file if we fail to create `child`? Why not put this in `catch` and remove the `if (temporaryFile != null)` check?
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); ChildProcess2Impl child = new ChildProcess2Impl(commandLine, process, temporaryFile, timer); temporaryFile = null; return child; } finally { if (temporaryFile != null) { try { Files.delete(temporaryFile); } catch (IOException e) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, e); } } } }
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); return new ChildProcess2Impl(commandLine, process, temporaryFile, timer); } catch (RuntimeException | Error throwable) { try { Files.delete(temporaryFile); } catch (IOException ioException) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, ioException); } throw throwable; } }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
Should you not check if term or field is of type filter ?
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
recall = Recall.RECALLS_EVERYTHING;
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { removeOtherNonrankedChildren(item, i); recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) { Item childToKeep = parent.getItem(indexOfChildToKeep); for (int i = parent.getItemCount(); --i >= 0; ) { Item child = parent.getItem(i); if ( child != childToKeep && ! parent.getItem(i).isRanked()) parent.removeItem(i); } } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
Do you mean, keep removing them if isRanked is false?
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
recall = Recall.RECALLS_EVERYTHING;
private static Recall optimizeCompositeItemByRestrict(CompositeItem item, String restrictParam) { Recall recall = Recall.UNKNOWN_RECALL; for (int i = item.getItemCount(); --i >= 0; ) { switch (optimizeByRestrict(item.getItem(i), restrictParam)) { case RECALLS_EVERYTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { removeOtherNonrankedChildren(item, i); recall = Recall.RECALLS_EVERYTHING; } else if ((item instanceof AndItem) || (item instanceof NearItem)) { item.removeItem(i); } else if (item instanceof RankItem) { } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; case RECALLS_NOTHING: if ((item instanceof OrItem) || (item instanceof EquivItem)) { item.removeItem(i); } else if ((item instanceof AndItem) || (item instanceof NearItem)) { return Recall.RECALLS_NOTHING; } else if (item instanceof RankItem) { item.removeItem(i); } else { throw new UnsupportedOperationException(item.getClass().getName()); } break; } } return recall; }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
class QueryRewrite { private enum Recall { RECALLS_EVERYTHING, RECALLS_NOTHING, UNKNOWN_RECALL } /** * Optimize multiple NotItems under and or by collapsing them in to one and leaving * the positive ones behind in its place and moving itself with the original and as its positive item * and the union of all the negative items of all the original NotItems as its negative items. */ public static void optimizeAndNot(Query query) { Item root = query.getModel().getQueryTree().getRoot(); Item possibleNewRoot = optimizeAndNot(root); if (root != possibleNewRoot) { query.getModel().getQueryTree().setRoot(possibleNewRoot); } } /** * Optimizes the given query tree based on its {@link Model */ public static void optimizeByRestrict(Query query) { if (query.getModel().getRestrict().size() != 1) { return; } Item root = query.getModel().getQueryTree().getRoot(); if (optimizeByRestrict(root, query.getModel().getRestrict().iterator().next()) == Recall.RECALLS_NOTHING) { query.getModel().getQueryTree().setRoot(new NullItem()); } } /** * Collapses all single-child {@link CompositeItem}s into their parent item. */ public static void collapseSingleComposites(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = collapseSingleComposites(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } /** * Replaces and {@link SimpleIndexedItem} searching in the {@link Hit * appropriate for the search node. */ public static void rewriteSddocname(Query query) { Item oldRoot = query.getModel().getQueryTree().getRoot(); Item newRoot = rewriteSddocname(oldRoot); if (oldRoot != newRoot) { query.getModel().getQueryTree().setRoot(newRoot); } } private static Item optimizeAndNot(Item node) { if (node instanceof CompositeItem) { return extractAndNotRecursively((CompositeItem) node); } return node; } private static CompositeItem extractAndNotRecursively(CompositeItem parent) { for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); Item possibleNewChild = optimizeAndNot(child); if (child != possibleNewChild) { parent.setItem(i, possibleNewChild); } } if (parent instanceof AndItem) { return extractAndNot((AndItem) parent); } return parent; } private static CompositeItem extractAndNot(AndItem parent) { NotItem theOnlyNot = null; for (int i = 0; i < parent.getItemCount(); i++) { Item child = parent.getItem(i); if (child instanceof NotItem) { NotItem thisNot = (NotItem) child; parent.setItem(i, thisNot.getPositiveItem()); if (theOnlyNot == null) { theOnlyNot = thisNot; theOnlyNot.setPositiveItem(parent); } else { for (int j=1; j < thisNot.getItemCount(); j++) { theOnlyNot.addNegativeItem(thisNot.getItem(j)); } } } } return (theOnlyNot != null) ? theOnlyNot : parent; } private static Recall optimizeByRestrict(Item item, String restrictParam) { if (item instanceof SimpleIndexedItem) { return optimizeIndexedItemByRestrict((SimpleIndexedItem)item, restrictParam); } else if (item instanceof NotItem) { return optimizeNotItemByRestrict((NotItem)item, restrictParam); } else if (item instanceof CompositeItem) { return optimizeCompositeItemByRestrict((CompositeItem)item, restrictParam); } else { return Recall.UNKNOWN_RECALL; } } private static Recall optimizeIndexedItemByRestrict(SimpleIndexedItem item, String restrictParam) { if (!Hit.SDDOCNAME_FIELD.equals(item.getIndexName())) { return Recall.UNKNOWN_RECALL; } return restrictParam.equals(item.getIndexedString()) ? Recall.RECALLS_EVERYTHING : Recall.RECALLS_NOTHING; } private static Recall optimizeNotItemByRestrict(NotItem item, String restrictParam) { if (optimizeByRestrict(item.getItem(0), restrictParam) == Recall.RECALLS_NOTHING) { return Recall.RECALLS_NOTHING; } for (int i = item.getItemCount(); --i >= 1; ) { Item child = item.getItem(i); switch (optimizeByRestrict(child, restrictParam)) { case RECALLS_EVERYTHING: return Recall.RECALLS_NOTHING; case RECALLS_NOTHING: item.removeItem(i); break; } } return Recall.UNKNOWN_RECALL; } private static void removeOtherNonrankedChildren(CompositeItem parent, int indexOfChildToKeep) { Item childToKeep = parent.getItem(indexOfChildToKeep); for (int i = parent.getItemCount(); --i >= 0; ) { Item child = parent.getItem(i); if ( child != childToKeep && ! parent.getItem(i).isRanked()) parent.removeItem(i); } } private static Item collapseSingleComposites(Item item) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem parent = (CompositeItem)item; int numChildren = parent.getItemCount(); for (int i = 0; i < numChildren; ++i) { Item oldChild = parent.getItem(i); Item newChild = collapseSingleComposites(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } return numChildren == 1 ? parent.getItem(0) : item; } private static Item rewriteSddocname(Item item) { if (item instanceof CompositeItem) { CompositeItem parent = (CompositeItem)item; for (int i = 0, len = parent.getItemCount(); i < len; ++i) { Item oldChild = parent.getItem(i); Item newChild = rewriteSddocname(oldChild); if (oldChild != newChild) { parent.setItem(i, newChild); } } } else if (item instanceof SimpleIndexedItem) { SimpleIndexedItem oldItem = (SimpleIndexedItem)item; if (Hit.SDDOCNAME_FIELD.equals(oldItem.getIndexName())) { SubstringItem newItem = new SubstringItem(oldItem.getIndexedString()); newItem.setIndexName("[documentmetastore]"); return newItem; } } return item; } }
Use log.log(Level.WARNING, "Timed out talking to YAMAS; retrying in " + maintenanceInterval()", e);
protected void maintain() { for (Application application : controller().applications().asList()) { for (Deployment deployment : application.deployments().values()) { try { MetricsService.DeploymentMetrics metrics = controller().metricsService() .getDeploymentMetrics(application.id(), deployment.zone()); DeploymentMetrics appMetrics = new DeploymentMetrics(metrics.queriesPerSecond(), metrics.writesPerSecond(), metrics.documentCount(), metrics.queryLatencyMillis(), metrics.writeLatencyMillis()); try (Lock lock = controller().applications().lock(application.id())) { controller().applications().get(application.id()).ifPresent(freshApplication -> { Deployment freshDeployment = freshApplication.deployments().get(deployment.zone()); if (freshDeployment != null) controller().applications().store(freshApplication.with(freshDeployment.withMetrics(appMetrics)), lock); }); } } catch (UncheckedIOException e) { log.warning("Timed out talking to YAMAS; retrying in " + maintenanceInterval() + ":\n" + e); } } } }
log.warning("Timed out talking to YAMAS; retrying in " + maintenanceInterval() + ":\n" + e);
protected void maintain() { for (Application application : controller().applications().asList()) { for (Deployment deployment : application.deployments().values()) { try { MetricsService.DeploymentMetrics metrics = controller().metricsService() .getDeploymentMetrics(application.id(), deployment.zone()); DeploymentMetrics appMetrics = new DeploymentMetrics(metrics.queriesPerSecond(), metrics.writesPerSecond(), metrics.documentCount(), metrics.queryLatencyMillis(), metrics.writeLatencyMillis()); try (Lock lock = controller().applications().lock(application.id())) { controller().applications().get(application.id()).ifPresent(freshApplication -> { Deployment freshDeployment = freshApplication.deployments().get(deployment.zone()); if (freshDeployment != null) controller().applications().store(freshApplication.with(freshDeployment.withMetrics(appMetrics)), lock); }); } } catch (UncheckedIOException e) { log.warning("Timed out talking to YAMAS; retrying in " + maintenanceInterval() + ":\n" + e); } } } }
class DeploymentMetricsMaintainer extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName()); DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) { super(controller, duration, jobControl); } @Override }
class DeploymentMetricsMaintainer extends Maintainer { private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName()); DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) { super(controller, duration, jobControl); } @Override }